var/home/core/zuul-output/0000755000175000017500000000000015133755507014540 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015133766641015505 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000315755415133766535020310 0ustar corecore]oikubelet.log]o[=r+BrvEZƐȒ!ɦ[M cSy-Hgf1p'Zc^*߶Y٬:|fu<ۭ_x~̎+ޜ/8_poL_bڞֻ];YoZO(_-V,<xnƙQʀClxv< |N ?%5$.zٶ'p~U Pm,UTV̙UΞg\ Ӵ-$}.U빱އ0* TQ0Z%bob  oHI\o.f/M1FHdl!و4Gf#C2lQw/]BPIjfkAubTI *JB4PxQs# `Ls3@g(C U {oDtiG'z֝$,z#fǛVB} eRB0R딏]dP>Li.`|!>ڌj+ACl21E^#QDuxGvZ4c$)9Ӌ|YWyCNQWs]8MKf, # qe䧤ꇾ3,!N{\00{B"唄(".V.U) f*g,Z0>?<;^N^iD[NrmN@ Ң`?Tã 5g=XzߛoE䭸[ki|X&po{Wl9HGAr Mme)M,!])V_帛AB}nyи0stĈCo.:wAZ{sy:7qsWctx{ul-+ZYsI{o.Ra97XcђQ0FK@aEDO2es ׇ# ZF͹b,*YVi+$<QMGhC}^}?BqG!(8l K3T[< ~6m)0}(*T7Siv'=k 9ԻreFj?wQ+KmrI,W i̸.#v0nFNV-y(&e'd,LFlPh ۬rW-V5e1߯F1>9r;:J_T{*T\hVQxi0LZD T{ oWHWc&)_`i=į`Pír JwJ`}OPSSii4wT  (Hnm//sE炱}r4(9qfhs8u'8KwI~3v4&8[q_5.)Q VE JN`:a!KM/ 9ÿ#:7erԮoQ#% H!PK)~U,bxQVpΣ@Kdb5)%L%7׷fw.3;egO ξD1siYLizpV:Bӽ D>P.BvJ>nIyVVTF% tFL-*$tZm2AČAE9ï~ihFf&6$&̴+sO~p?5!}~B}-{C):fUr6v`mSΟ0c/nVאUʇa )$ {SCBoX^P\Ja 79clw/H 鄌4:B%cXhK I}!5 Y&JO _y@}DS.€>2T0|9ģ7$3ηz^.I<)9qf e%dhy:O40n'c}e1XҸuFiƠIkaIx( +")OtZ l^Z^CQ6tffEm_Eφǽ{QiOEG{P;SHz"2Zjǽ}W4D)3N*;D֪v3l"<, { TmsGoI&o'Ad.9,Ç"q ChCMAgSNdL0#W-CUu"k"圀̲F9,,&h'ZJz4U\d +( 7EqAqO:rƭĘ DuZ^ To3dEN/}w zI+?|Uz5SUZa{P,97óI,Q{eNFV+(hʺbW»ʞX6ýcsT z`q 0C?41-5_n^ylSO2|#hIc*Qqk&60XdGY!D ' @{!b4ִ s Exb 5d[ߤKߒ'&YIL{3ilLJ!Ip,2(( *%Kء#AZ9 K>UHkZ;o︍8MYDa3kp1.m`XIB[9% E*:`cBCIqC(1&b z]fN_idQv7ݸCVA/P_mF@?qr7@sON om$rnt^.kje*;iΓA׼]/Q)-,=1A ӒsK|ۜLɽy]ʸEO<-YEqKzϢ \{>dDLF amK/0-Vb=SS|k1A[|'bɇد;:X:@;afU=cru3CK >Z%LwM*t{zƝ$ȾjHim @tBODɆj>0st\t@HTu( v e`H*{Ögڌ:8cN|U1,-N9 dI [@3YNє0vۈ/ze{< 1;/STcD,ڙ`[3XPo0TXx ZYޏ=S-ܑ2ƹڞ6կZ8m1`q@ewQT*:ÊxtŨ!u}$K6tem@t):êtx: `)L`m 1ZK(5dc}QQufCdX($0j(HX_$ ^22ݡjR:'?m@ڤB^dh NS߿c9e#WC _,XѪ;Ʃ2tStΆ,~Lp`-;uIBqBVlU~F_+ERz#{)@o\!@q['&&$"THlw`JS[l52, 5 CۈP$0Zg=+DJ%D  *NpJ֊qTf)vtT̅Rhɇ ќu^¢6}#LpFD58LQ LvqZ.DOF_Z2aޙ-did˥]5]5᪩QJlyIPEQZlY=A+'h=TԫeVިO? )-1 8/%\hC(:=4< ,RmDRWfRoUJy ŗ-ܲ(4k%ה_' c9*%WyΈ W\Of[=o̕22G w,D"WhYkoն a.1wXhxDI:;.^m9W_c.4Q IJipqc2*;Byݝ0_5bZ8ւ 6{Sf觋-V=Oߖm!6j< f`mPіpNЦXn6g5m 7aTcTA,} q:|CBpuFȆx6ڮܷnZ8dsMS^HэUlq 8\C[n膗:68DkM\7"Ǻzfbx]ۮC=1ÓOv$sY6eX%]Y{⦁# &SlM'iMJ았 t% ~@1c@K?k^rEXws zz.8`hiPܮbC7~n b?`CtjT6l>X+,Qb5ȳp`FMeXÅ0+!86{V5y8 M`_Uw ȇkU]a[.D}"\I5/1o٩|U戻,6t錳"EFk:ZM/!ݛ@pRu Iヵvyne 0= g`_w\|8Fjȡstuf%Plx3nvOσ =?6ͪ)Bppًu_wm/0}T>CUX\!xl=ZVM\aٟ6h㗶E۶{O#X26.Fٱq1M k'JE%"2.*""]8yܑ4> >X1 smD) ̙T~T,Vv{mxY}SRL-by-a3&(!F)ϋ]8ac#sĢB\PIPfwJQJ;Qxm &WBf\ZA$Ba-z|@-I @x70 晪MV)m8[6-Te@`E^0{ P*27ެT|A_mnZ7sDbyT'77J6:ѩ> EKud^5+mn(fnc.^xt4gD638L"!}LpInj2ɘCGOa9C1L PU:LNTPlI&N:oճM\Qe%*?vQ~W  yr3-2+=Щp!k2wu_~c9'\ॣwx"k%oTͯ܈'i1Jh`(D"y@ "0#7=OP^b5K 0Bt&n2hev/nw 'hEY9[Nj_vZ :jJ2^b_ F w#X6Sho禮<u8.H#',c@V8 iRX &4ڻ8Ǘ{]oOtsϑ`94t1!F PI;i`ޮMLX7sTGP7^s08p15w q o(uLYQB_dWoc0a#K1ݱ^0 R' eQ&Aѣzvw=e&".awfShWjÅD0JkBh]s9Ąn~8'hYǬSiYf)<(j%\IG.`ʌ=}㻮";?ޮ}Qrb=OI$(_F:lb6sJ $ oa"$d#HXu`\mzޮ63=nUu u~xUF2Q~Ӈ/( [ycy`ðSmn_O;3=Av3LA׊$AZLk;3qUlWU Ry==ck vz(vb$^Nyo$p[DtUCE9sBz%lOONRѦmDVmxюݏX}K6"Qi32\-V_kR(I-wtSJR^m{d a|y,F9$^@mdH֙toN1 < ҷBq/ ۓ,j|z6OSu;BKŨʐPqO K\{jDiy@}b|Z79ߜih(+PKO;!o\戔-QB EM;oH$$]?4~YrXY%Ο@oHwlXiW\ΡbN}l4VX|"0]! YcVi)@kF;'ta%*xU㔸,A|@WJfVP6`ڼ3qY.[U BTR0u$$hG$0NpF]\ݗe$?# #:001w<{{B\rhGg JGIެE.:zYrY{*2lVǻXEB6;5NE#eb3aīNLd&@yz\?))H;h\ߍ5S&(w9Z,K44|<#EkqTkOtW]﮶f=.*LD6%#-tңx%>MZ'0-bB$ !)6@I<#`L8턻r\Kuz*]}%b<$$^LJ<\HGbIqܢcZW {jfѐ6 QڣPt[:GfCN ILhbB.*IH7xʹǙMVA*J'W)@9 Ѷ6jىY* 85{pMX+]o$h{KrҎl 5sÁbNW\: "HK<bdYL_Dd)VpA@A i"j<鮗 qwc&dXV0e[g#B4x╙✑3'-i{SEȢbK6}{Ⱥi!ma0o xI0&" 9cT)0ߢ5ڦ==!LgdJΆmΉO]T"DĊKٙ@qP,i Nl:6'5R.j,&tK*iOFsk6[E__0pw=͠qj@o5iX0v\fk= ;H J/,t%Rwó^;n1z"8 P޿[V!ye]VZRԾ|“qNpѓVZD2"VN-m2do9 'H*IM}J ZaG%qn*WE^k1v3ڣjm7>ƽl' ,Τ9)%@ wl42iG.y3bBA{pR A ?IEY ?|-nz#}~f ‰dŷ=ɀ,m7VyIwGHέ 2tޞߛM{FL\#a s.3\}*=#uL#]  GE|FKi3&,ۓxmF͉lG$mN$!;ߑl5O$}D~5| 01 S?tq6cl]M[I5'ոfiҞ:Z YՑ"jyKWk^dd@U_a4/vvV qHMI{+']1m]<$*YP7g# s!8!ߐ>'4k7/KwΦθW'?~>x0_>9Hhs%y{#iUI[Gzďx7OnuKRv'm;/~n-KI`5-'YݦD-!+Y򼤙&m^YAKC˴vҢ]+X`iDfuMik{Fm(W F@@{W+ߑ?X2hS4-=^YgpUHެbZ!y!ul@ڼ63" ۩:6=TZõ$E,ϓRV|G&$rr;J TtIHFE=RȬ]P pLm|?$%>Eü%mWO[>Xmw,*9.[G n >X8Ī;xW%dT:`ٓ~:QO,}j6j!yڦʲT:Pqҋh] H+&=>g| Z;D8ܶb:! Å{2:+au 6:!fF+0#+̬NY"!6a7#񕪰%:r|o5Znڧs?si/W qEU馥˟^_޶oڷOj'?nc]Rn\t3^邳塨Lɏ"ˋXIozi:nq Vq8swHOzf ̙eX-4`TDGq G.tݻgq74ŠqBFf8 9Fk Afq#ϛa$!qNCJ4bnvB @W,v&- 6wCBjxk9ᤉ ,Asy3YޜZ4ΓVYf'h?kNg?҆8oC!IMo:^G10EY↘H:L@D+dˠUHs[hiҕ|֏G/G`' m5p|:9U8PZ7Yݷ/7cs=v{lLHqyXR iE^1x5/[O6rpP40ޢE_A͝ Z5 om2p)lbp/bj_d{R\' 礅_}=\:Nb{}IStgq$<$ilb)n&  $uT{wD]2cM(%YjDktByxVl巳1~jpd1O9Á%˧Byd}gs9QNʟ. /ӦxbHHAni5(~p>/O0vEWZ nY3 cU $O,iLacoW1/W=-kqb>&IL6i}^^XpCŋ݃k-$pxbڲ&6*9mg>{rtD)wQ`pkKyt1?[ˋZ5NhfӛŮ Qu8Y4?W֫/&W˸~%pqq{% ?K~,#/0'NZ׽Kq^ėSJ6#j8GO[ PCbʍN^XS&}E9OZ]'t$=tnn&nu [}Ab4 +OLuU{0fIb { O;6q6^9.EPHŽ{pN>`cZV yBJHVuV_K2k*`cKxuBG&24T}Lai 0Va(7K#ӊ!,ZDxFQO*lם>!4ӥ2 ]8â6 U`V%`!c%؎ʨTzrKh! c.}.D>)d_ 8rcu,wf2?Ǡ*_lDn}rauyFp*ɨ:UiM2r:9ct X1lmĪ o玓,R%!`hGT LYF#g<cm${|Xdu4tmtїUJ\~dc0KcMlf2?mμQ ߉J4WrSHTdp"ӹ'cJq2zPlX̯.0H!ND@UapVoGڧD5>H]f@!=߸2V%Z 0"G4ȇʩ@]>Y$ًF_Mm_Tt)ib+q&EXFu򾬳ǝ/RS>r,C2NfOjpcm{Ll9vQOT>9U;])>6JdbXԠ `Z#_+D[7IIjJɟUh ҙ"`"a ߒ"G̾H`6yiCk(OA/$ ^%K^+(Vr[RR1"u4A.1X0=7f/"(o9/L1X{]q`Ȝ/; 9a>E)XOS K9mUxBa"'4T[Jl /K/9,rlCAj_TiǘP,:4F%_0E5IE'rX-|_W8ʐ/=ӹjhO%>| :S Px„*3_y.g9| ;b`w NtZtc> ײ1KĴ{3Gl& KT1ZWX8?C]~We$9; -.D087?1a@P5B,c}jcGȱ WW/ @a#LA4.ٹ^XڋXٝ:^Izq. ٽƎDn6ٹBc5Lt;3#i3RAٽ9| cbpcTfp> 6L/_x 'ۙz7~w~);qU9GDT! 6]c_:VlnEUdn6UˇKU;V`JUݵޙEO[)ܶCy*8¢/[cչjx&? ՃJȚ9!j[~[' "ssTV2i sLq>z@JM->=@NỲ\쀜*/) ̞r21.y? bO]3?C!yw3ޯL_Su>o>&lrw&i"< :]_<<7U_~z5є/rfn͝MLmc 6&)e+n7cyy{_~궼07R7wPuqpqo{ߟ+[w_uOq?u-|?WS_tOq?Eu-L_p?Cz .e ϿO*3 `Ђ6a-`kIf-s,RL-R`1eL~dپ&+IhYRczr?㐟,v~,b6)up)3K,RLW"Qd9JgT\1f3@Kh% a4x,kA k ^d kYj5Ah𚄓vXZhX1xҖ51Y +Id ZZ\C| fD>hB֡#-$+Jpሟ,Cg:6 3 xH "}C[`ӨOAFn5ʬLHϰ:N@VcyBI#Dr. "h hg ۃm-qu>V&൘ G7qi#^tҒ[JI!{q*lrD܇Gk@;oI<5xZ4xM"؇'k!>V|lk'{d+ :sXӄc)?W`*|\v aVT0"tMًcΒVz]T.C$cEp._0M`AlF̤@U' u,—rw=3}resLV&ԙy=Ejl1#XX۾;R;+[$4pjfљ lݍ3)`xvcZRT\%fNV Q)nsX }plMa~;Wi+f{v%Ζ/K 8WPll{f_WJ|8(A ä>nl"jF;/-R9~ {^'##AA:s`uih F% [U۴"qkjXS~+(f?TT)*qy+QR"tJ8۷)'3J1>pnVGITq3J&J0CQ v&P_񾅶X/)T/ϧ+GJzApU]<:Yn\~%&58IS)`0効<9ViCbw!bX%E+o*ƾtNU*v-zߞϢ +4 {e6J697@28MZXc Ub+A_Aܲ'SoO1ۀS`*f'r[8ݝYvjҩJ;}]|Bޙǖߔ 3\ a-`slԵ怕e7ːزoW|A\Qu&'9~ l|`pΕ [Q =r#vQu0 M.1%]vRat'IIc(Irw~Z"+A<sX4*X FVGA<^^7 vq&EwQű:؁6y\QbR9GuB/S5^fa;N(hz)}_vq@nu@$_DVH|08W12e_ʿd{xlzUܝlNDU j>zƖݗ&!jC`@ qэ-V Rt2m%K6dX)"]lj齔{oY:8VmS!:Wh#O0} :OVGL.xllT_oqqqLec2p;Ndck[ Rh6T#0H Q}ppS@ώ@#gƖ8sѹ e^ CZLu+."T#yrHhlكʼE-X'I^=bKߙԘ1"+< gb`[c1髰?(o$[eR6uOœ-m~)-&>883\6y 8V -qrG]~.3jsqY~ sjZ+9[rAJsT=~#02ݬf¸9Xe>sY~ ae9} x* zjC.5Wg󵸊y!1U:pU!ƔCm-7^w]斻~[hW$k sE0ڊSq:+EKٕ|dvvjjy6 æ/ML-yz,ZlQ^oAn-})xǺǍ--qcl:WLg ӁvJ[ǧc~Of+8qpçco#rCtKӫce0!Y-+cxMK-H_2:Uu*corD~@N`#m~R:ߙ歼!IZ5>H;0ޤ:\Tq]_\_>e˲\oUQ\Wߋ47WwߋKpwSSۘF,nC.\UߋoVEuY]^VW0R=<ު˜˻ x}[ێ'|;c^ M7 >5\-> m-8NJ\ALd!>_:h/NAC;?_ξqĎ6xMY(=ͯl~l8V0٨T zL{Ac:&$ ^CpH*DW\r2aR|=(L X1|wrO_g ux1^^V2޲jMi^b``Q#dBxV#NBk1;DAV$"*1]Y~ d->'I`{6ά/Ke@olg(u"K.NH\x֍6us!95Iw ER%yɆWTw6Mb_$_wyAFbU!J%ʀgTYIJ7@(J%\Ri~)bԼ͝Yq{\~*ϓB6Id_&B2j *1~#˗g\:AC!~Ш FM 69=(ZdZ؋LEHM&/;Ww0V^Nt^~ⷃ_Ƨb+I_㓙RʌwNo&Ah8]p] EOd:d=4:F]oJB 7"txl{aZsazfxacd3ILVokAU{,=Pc4כv'cy{{=uX}#izqL<۴|w"y *t_^{2Sc,%B!O B.Ln߻W!\5W;ky7uۋ%fI\OY&2Ia\;5bi3Md"׳DU..w3Pl; }f5</(ʼ(U?2 x* fE"Giٶz9,rב bk'4x:ϛ!OƁ'EqjRPd4U -#xt~~ ̓Ww+(uw3hKDc{bm3|}9OcSvpJ "ybA#zF0Kʗrƕv֩P `s#+bQ/0ȣm Wi֮;Fdq0Bqܩ 6$NS)`@ p[8å['gկy膧?$Z/E5(LO'I|s{59paQ-m" s.hi. L@mmP҂J&*:^ V #OOk..)i剄9N44^JUH)HP _iELDuy6!Y+2b;%WUޔBɳ*OYwϕhJ3/Ed{{7d:aЭ OlCöV0l b+ pƅ{ƸuMG0 C֝ecQINpyNk%RQSoTU}+׹aӡD zn(${`GpDSn!$P,ӡZY3ՃJŶPM:ځɇFI>'ƾeS&TW:$Boe/YNBvWtOȓgΟ;{iKHR ׼I6-UUS@yۏQD1@X6E⓰R @ V9r"_w[]vtnv| l9+ѹ ShAAGz$bC2:8O]slF 5)VHs跷#R?px`(RKJI*+H*7(,nySxg'mj(E2]yxpHyR0(k[3p~s2T4ÃQ%0- VÃfW[X^D.`Jpɢ4}<6z18F"۽؇7ug2O]AYϓ?k n8@RXd>}F3{[eeR.m4MS?tF9-ah, ʀo1y9@L0x_`4j7X0lBS3Tjn!3WUe'h z$]u 蔡h WL2'|A\\1ќf0w˛zKyǫ/`N.2,]5o`Ð  jf: e8\V6"^@2ݍvObqR(#غ`C=ݿ&fY: tȉKtԾPIƒe0 I&%G"pCvXZަÓ$ꅛR{dN6[`&)Q<wbf `0*ϡbD6u:vy ,ئNîKndZIe0{N't8i-]`,o*uT]:)8TseOJ W \'hGXWQhu8Gr"@G0->ao`,FjeR_::~::׹}1*GGP? `l˖w˜%әQ`htt~p_ݡk醛]^|bQ-]ИBLZnĊnpP-II)tA,29^+ ԾΣ&& 65DԖ55I*M0Dƒc^Q<[Am\e /H#dwڵm-!궮cMeh*x{:l6lpizXrq pW 6\y 5uV\zxuy˺w+t$EnCڶ",.>fe;eTk/a:X@h.Ԙ 21WG^uW"L48VZю$=}gsM!y]Kjv4<* C"A\ZH$uCOOۣ4խ"I. cMbj-{kۮ,q};eDv84CE"C+}?pDd L/i.S`n ֕71@i(`&aiM$} t(\լ$ֻTBGsKJ$\Q"OKK& PjFK^ m =Cǯ=gh?;D;v==<\ F?YnqxN, PwHdW}tXߪۈ@̀^=xg oPVJ|^,AbLB`ht3MYV>x˜mN!FA#ƽm=|C(xU]  ,mtaar".nzsn*L%jK!39Z@[L,yM&HuuLwgؐ9LsF35WmvM例h؂E3Si՝W3 %xɛQ5IN2+9!IWF\ʡ>R "Uݩ:/4 ~1F'Hs1>+㖣UZF^m~&Ω_i-}ebc":ffO57L- [0tz{BeH۾ߑ>%twީ[Ѯ@(w\p"`ך('n3#7d;+nʊu:(BA pWZXۍh>s k4JK@BE0n\ P(3m" `wvDn'(N #s+ȀUCr4w);w1aNkdۡ[ߍŶw'tAhx!Xo'w;59p#a x]z& ~-[ %ڡz ڑ߈e{hpMgB Y}MВjgƬ0i] s8+(Vݭ( >|S1\$HD"dn(s6IEM<0Ƚɋ_b 0V+@j Oiݽ#׵}[7=NO|^ ,~/"}k E=vI%1y{:"1Y^ \9,ɢQ^'Ta7:&ic5a`V>Uq]|G|SW}0T{ p)DkյgeRpR\@arչ7 ZԸCs ҕӰ V7*Ĩ6H7g\CD[4.+ ۃ=?dO<jb%T)uh,k< @iiw P\ 0|֭{ Ͷ4#x 2^s/!`}EQ2.̵,XS8 oOy=W]q щ钍 P4|cp,wpvh0L;zNtG A1n?S,!; ,5j1" {4y + iF!l n(>ϰ;|# Dž@tn3ѿ{ѠVv s{CqP$qiXʳ^h>,K3=+ݻdW |0@6m+  p;y0]}=X46 C ~/B z X'YtY ta &px/9 |w <[^? A/WFT#.JPabuPP5NlpA5;>bRU|@%{+@ш{gF]hF^2R8B(l'V8Lkhn~1> ]o FyO)9_qm]褶: ۖL#P-MkXFk>Wټ/!0%#E:㺍޶qKget.t1"u%|%hOzV62X '/K\"C9',|WreB~umG-L KwfRګZWUؐQDT& _KܡxVC} 8H+%ɳA5\.t\. Pgԧ|qD, EdHY%KBeH`f -ѪiK7 q gb%!0-Ȥg]hGU*KZ7#q3RРwN;~.Zg!7Ui[ZҀu5SD+5P`%0ͥ*p*F*rJlIE-:MӢAL-*LGnr`.Nx Wu7mGW0k̻ܭvδp"zDUӳs3ߘ< q<8)@SmC59?a܀50u ͱ(F"d]p CC":k_ŌٮupAYv".&Eg<Ѽ#QPs. $`%ep=buq "s\O HfYpU,Jq^.'rhhgPU1OvC~mx|t+KH5*d=o0EN:=sJٙhXButPz5m qUZ0g7"i=[ڃRY0pA~,+tkl"C3"49vBa4q$`'W8d,c2md̴|Ӳea'5}zub;xl C u=rEk7;uDwfhDqp-/blbg;>O TFIDN0`@O&cN"2'JLobY&R r8$ȧ1Y 9}6&ayj,QYǕFG`{?xT4 Is簬3^J=>+ivM8*jU%=cӍ-\=XNԟ lYU [=n{DaXMɩ-6j6~qqKmvKҠ۵~/G\ }WP)YReً _2Ya5Sb$f{~#;N6I1"\qZ,[8pԷDѾ7B^٥Vިnujz]oC+Zwf9 `B ,ݠcA(ܽp3>XVcʯ2s6_+ ,ʯJ2ZCW(Hx 3sp$nͬ6/v2[%1Lc3np.J { rSFڳ *: β݃FimQ0M |!yVj j )t]PtwA .:OyuAeO=RP|]P|wA XT ]P4A#; u){+2ex wd(K_gDᲪP{AuVyZ8s0瓙\#?0N2/p#f4!r..D9n'jsw@\ 6\H Q6P;CU \Br O|h!8rkH?gï8 9CY_pA^"-.͖_iᔀ1,   6~6GBэ'F#:OձZ+u8?_.ҩ\cq_LԞAF"FxQl(>,/?NgeqoebՏXUuGUURΓHTf˚Woe}~YQsw |s?hA!(dył1:ޭgo0ptp E*S#Q )xϰTC(m.kptc~VO QN }QXssRCBS^*Swp P௒I3NZV(=^i ʼ60^g|G08gX^mZ#Ȗhe͠d~̒"ȥVnpn&TtN"h-!}j.p;``WBJ) KEQ*mLecRT6T#aohj@igleVlA%Cnty^{fcɆel>Eb.r4,zs|pKd2c'ڟIEث(E3܏r4n Da |@Vy,N|AVh#4QSF*.o ͯh/q")GiiP Nl>5{򹻪4s]>ăӳVqô\D*c#~20p@Ԫ Ytϰ/45@)Ϋ2=W XQ'5\5Er}d`*͞/ǀWᛔj3bX}㳻!^6hQ4TC gj:L1Gb$,ЋↄI2{<"S MNQG"͘MYƞwqAR0vp@lϋ9Q]hqUݨ XeA4kEgN_ȏU8I&IdQrKA"_6`lqu'--A,` Ά%{s-\$́f1bp*K Rۃ|63TW 0!D q{LǸ8r8B2IB{r8H)`&EN <Xho^ RF+ dy=̞TkY4ޜ&Muבp0:eiSb9bm֑Y+K\k['*#X-0E=|r~,rYj=ʭwJSS۴o_F^V+ue{W!ؕephHpKpȥ4rQ2=CG}!i+'}2$GvȬBw}ٖ6JBPط%f;K-?=_x}yȶ6H j@tQ( ɌJ%a=FQ`!ȹɖ -Ӻ`G6e#rWp#;_i[HD_e#{Lլf0hQL~(mH鍪J=BAZ}>zjIz͑oj}7[&t>1~|wӛ7 8AkֿYaby>φ gCnwqIx{7~dƿ-U4j.oGcx5sxݗca3}Cџul^,w /F|R˿AiZĆˎC19T'>z-?c.jzOt)]Io_g:o~|;1JOۻAx:2R4gꨗhQ!+>LUjmһ@lDo VMR%|" \`Y?=~hO#ç< ƫ8jYCIQ 졵BQ_( }bW(3~ 풑v p|339LWJ`zL`X"lR1E!V/ ]K·AH AE:'h" 6{(No.tN zBb:kDQy[@UJUtZLM̩.Xc/*>|?ˤQoL'bFV V> lΖ^#wIu,脼(ߕ޳qH߮W E"pRvo8z.*^l(;¯\kLYj14HŸjj$+ki$F죾(5/ j1vJQHņ #ٮ*ϡ \s1%3KZzc;j0s& Z:!+)KIG <%؍ k (4Z5M;q[ba1.IhQ$j u,/N[΅hH~N@5Qȹ3 t!cبJRIu"I'8ޠgѸR TkĤ e3"绱N%G)Np|ł2w߲`m+1&NHI+#nRLJQ2ZDƻMҼ1BÍ"/Rmc{$]hl3CL! |8cMyQ(N=GGTR^ 2`WJ=&_P|4h[*Iz)R$AyXk[CZ7/č8jUCނQu4U}.E JL[q{ja,Ҷ՘MS_o%wt$\?uV/àH0!YL_XH 9: KE֜Sг T $U$n&*K11Nݲ(7Vab9ΛeO3>AQ*^H/UYΰꢬ?ݯW˂[5("9C&ݝ[r8fR>^64{poڦB'wN d1(Z@1n5Dk5 z$j 1H7/l9p⪗h bX]c`hċ E']|Wg.\`ryZ}y^mw<iyh}|.TtGS0l$т>@ &chm: ?(?~yN/Uqf?ٍ.u mFT1,jXW! UWYorZߏ;fgU:q{RQH?/;6:eU:%[Y0r)?ԈgI'8| V^m{tں箸Y|V-wW鄶뭻ԓb#Kbng$,-$ƙaQ(1|V/L'sOߺP'sq |ves2q!t((ӣA҅"+E)aH"+۸ӒT.| B]83J]336.?m\6.w?}˻Eryj=dBoiunXR@3֏غAҨF,m{<ʹm\luqK׺f*LE;yY']NЕңƔkm`vo0dwp)LE{Hٱl'c;CJd[">~Cdԇ1i0b6wJCq8yWJZuD}e:GlѝEًAw׿~8ݎtpgI-tשuXCW2RΖ8 k {zց;]>&lihoʭ!Tw#jLZԍsְnÌתX$Bw"]Vn*.zk9KJGn%6M2 siّUaׄa[Z>QO]| Wdy?/^6a燏aP=k*QeI9D[>!{~O\5~exqze~Q޿J—(naC2~\Ix!4 mČF^8@LUbN ;%7_ QNyځ;%8B? k +Wv%Y^~>;C_O\A:҆i2EAY| B98@,{?ZhaV9=u*7wahV‰k.oM~>M/Grt~) F>(K*?т,(D c8 wyw~HlK"QOri U-擋O:[ENҹ2 Q`pAo{V=ކcCA?3`L@%Cp)?^{;z|3J͝ޏKzcpF.pI=хƽj,b3K D-hxnQߠ-vGOpj k˭'ȳ+'*P7 P `b\bgPuGۢ{r|>pIAcptc4|Q?}O&cfC;S`@0ڧMLddo1"NJcbT ~ ܘ "rmiWhYppvIJ2X3nlttc5LiQTͣajt!9G!rs>)K K:g9j'>AP7s&$MZ9 d΂53ʹ VZfs+5$AtR@́nR_2qޗ[fMtCP0uGu$oU`ۣ jU`IVnb޳#+Cux@S&{x75 s]8f*DTC`&2qtv(nYC B-lz^sżE*JK9k{Cilg^ZO>7xD!զ45CEHK4\5Q'mxKt\7$k4kN?Q &mX#|0mXЪO`| Zh7Oy1*K>^2ؤG*T6 qk/J)=?ơ>?ƣSzN; U/5TAB$h( Oqk( ZijiC-?tMCY|6IB\iq,Q.ݴhA.)EjrsX&Q X(,o8Ըyv:~d=etԃMh].:7q%i_Kz `vyfqR2hљL_NޝWn[[Rd32+ϨCBR[#/Gự0oB|d\a6.-6dwtϖdٶ ۧ?o$%ԉTa!s,3'Fsyʘj1u F#d{rd"7*r0k#~}'ۺ־Jy>堉WPjfįvѰk-gC!ܽuvBneCWL1 4ZԮ5C ̷Tz@mڍ&D໷hLAg ^wǩ{ HKn-޽90a%9J0bGl8 !Mv\sUMc6v H`']h$[-vmOO.߆~]D*;.za/P&79bH긵P"Fg^@3;0)kd.? ꖧ&'Ywt(Ngu` 7Pb9Fӑڝ ]5F3ViuY= .Hi 50k%6\y#,^I05fG4f Te55cf(.Eksr'04`!$+iCu;u;fa֌U9HU "h05d,^嘢`Vw oʘ 04]_3jvTO3jvf] Y<׾1zlcO uPt@Ĩb7$e2 n2GWpp}TX@&n4ap(*rs|)iqC+g-S72|) ϞHӹ*dɈk?X&egd9W!޴ J΀+q1;~2 С(L\LZv܄;f4H(37=S"SQe;6d0s J%n] ],#Ĕ9E~y_-R8*:tw(/&@m )nX(@tˋi*MYDUϿ;Qh0ofBro8B䈰.] G_| ޹ tÏ0Dt>Z(Mw?Ԅ/(>ECq߷? 7?ĖN[IOl;`j+Wkv4\f~(R332 wdϔTij ,bֳ3ɱKGaW;(-6UP!K 0'`(A`ppS!eT9=%(өgL2KYA m4xy?<9K|dyʗ;;]Yܙ.'N{i=C$5Υ.%QaxL!,J96X1H0N_oy|MgfsWU6TGs*f^|n- Q2MKb ^9lAzѴ@:X+ )&2D a#3P t0SH`EIjbK!6JWEmPmr>(BM 0IqmK* ż DS :5iniqVRfniTZ3mq9k6֥uU%y 0YB8 屃O3üMc[[,<=ʐ`Iƶ%{h+YFZ8 X=ٚŐR7)XV9K"=ftuZ3 n0#GDz/ShM*T0a%0U7#)ٖ"z۹5+o%zDm<0e)KYYF'5!Lh*$-2!LRXP%#D($YLvX5P4x:Vޯ۴M!F{),U2 ܯ,T\*|xw6~ᑋ4T"*bǸؚc0=ԫMĄ.F|%.f2Ês&(+} ٬ZeTnspb1c{']%&)򫰴"@ 7KC۞k##pUHhnr7@sϪZ5e'2_L v|4U 7{@p1gW(dC1QuOQS[ $-+OJeB-E %>aYɫC?s.*B8K-aRpaxF4fw(n!OVt9qwR}bAGܣS>t"#y CFi=4j>@9Ѫ#{[~b}:d Edmz䊖SPԡ&,&9ekA߳crϽ3Oz{&d 5E(2 [&jG #ⶰָ b{nTZ%iA1M:$ @nWtEj5sE$;K@ =+9JAw|+PuQd<0"XЄ  "xq@E~c*.pC'z ?t+(X]? RٙMEjM KoWo>~Kk"Ҷ"AꐩmW mۻN-x*]h!CFXW0 *vwQo) 9_3R#EG`][k% Y!5ad*J w`;cx\Ebxε]\f:jv֡RiFR1ILk!i#U/!mjuj-S֪cͻ} "mE"-8fZrŽqJ=$#ьYu4zd 24cZ+o=r.[Zc% Z.I+`VT~YE)>= 9yc`vh#2Hht ըᥣ5:9N[hm8ZVV0s|;ԥY*Rioηưrg;3o>Ί*#[_'9qo(/=,˵ \q:5\-/ r|v;m֐2}T!%\Au`3ї٪/A`~)[KŭnVEmXM$v$d,ܵΊ j]|^lPx~q?0̓yWѮ.h=Jcbz@` _Lcy<" ̖VZ#[1JQV>43X⎳y0ƛg39-?A 7@kw8p3JJeN#ܲj !mCɚ[jvXYpЕtu%NP^ ҕ +Nrzr-Z!-ȔҴ~5A-(pNx‚F\ڝg_V0Z>%N`{애ny4kyz1x~L'P VuL)wW7L#sG*0!/I?2uXqdia"4:ܤ/|H뺽lPW ttCy^4醏Vu?}]??Y8Oڋ6FaX.dRߡu!0+̺ѝ7{BoQo@W»-)@/-Wae 3l(2R2 KEsEGʥ[DvKy+(x:' {ףI<{كv W(\Yo/{/>`d0 ?zY=囡7I&7dqz Gw׋|k 3&kŧ".'iN/ita/l{*fǞa({&L ]0~E }6^% |կ^x3JqڂG_C5m9U{zk_G[C7H x֔&+] (*[-?`;|vu6J6BvhvtaVNq%/4MB &YH'zMāfse4Nv§W9-#PA4\kr|0x9CuAkc-mQ́(g>ijuM}ք8{w\G|؛B^,r[P0,MOOPtfSz_ _j`QTo 6;x+^KRrx=]^NA6AAhƉ"O_>=ܔ[u ZLbܺ$Ʈw`aK&EhT=_!8E4e&` #?@ +)we]vV ܎3GļAO}?X8Ԇo\/(iut/#M=~l)~-2}#5- N$S>I1R "Y1ϣȷON)J)/Af7&gKNd}-9o+S"r䲖#Pk`9ǙrRSlLiN;$z C UNKQUI'/S{<:Yl+E|K'e$EDfA*} pY{0YfOoe0,lY8[y›偣p#w\c@^IΧ taM]~f!{|X˿f_=/oY/Y4O2^)Om~K-`v*7;t~iꃱMұ B2)ܻGշ(?Fw$uD +TÈǍ4 %6"ԣ1#-ֆbB+}Èydļy$gy V CA~-2! (ٻh2Ja8.Op| ̣gh9t?Ow߆N4|2cz|r}rc$ 3,pՅ]7OfxyG6%3k,_%i0 X8gy6/8tt^?^Re;o:'4sA2h;u%ާHݼ X æQ*.+ >Y+gZ~(X_> K]ҼH -.ГZ)CTC! 7`+#Zf *n++^em2QrO# `H`~ ._GI'euӥ;?0..VËcv#Gav/cֳ/ TWK*iJV#`Jb5`+qGF\iuʟ>+(sw?KBy=iyu3e>ˎ՟\~0prKN~bO4^յGWJy~kZndj(<ÑkcȃzW[sijxE_Wsoa*)F?V{i%ܷ_s!2ܷ)wk\ÿ{ZyhGR*9ݬcǹYCڭf܌q6X:U Rr`d:9׋-.v+\1Σk?.9m/%Qm=pT4(~H=2Z06.r` yȱRnyc3}e nYشIN0)z,Oߵ/ER}mRەϿwLww}`߮>~؏?w)&>>9oQe!ڶ?{s T{|Q!e͎Gj&OF[9hqcmXC=j }{uww?4dpST~Qܞ])xTOOgd穀U9LH}2I3 \XF,hS+&=ANM/a^O?|@5VMfzxgQVT ~pu"3M\(3I18Z,ǁd㱼H"=yx~ٛ8?7!Pk%`థds5< ;ra36Ql)y}l5Ktz&qxϢfp'wfĴ Uk1ϵXZ10_o>╆6Iq*K YId;cpXrGcq!䉫 | (`~}ylqXyR@B1K$"2/yY~Mt,*N@p v_pCO6VzWXmL$X2_K:+y;ncPmGŒEkP)*\EIP=!T鼎Ǝ-L߯|SY^QO?ϯKS3pUZl!Uvދr0O{tƻr7W_g }ݟw߭jPx+`2o@yH J(K9½̳-!KzC0RPHDYDzFAm 2ƊՄR;h ⡶P c ޒ mI֯&b:;,=Yߤ9jE408_#iT Ω<DŽvj7[Ik$ uj7&N-o)c՗U#oD;~J#NJ*~,_xm TWM@A]Մ!ہx)u RKt5 MRmԼVc$P掟QӘnwk<12oY=cC$MUUMEEJZsR,bҚȕ$4vB PeӼZ1]G2D WGpqV,tرrPK5]0uAcC?Z ,7,?I||żEuQ]_7Djt1ukt">rCD]*`6OI-сW~F! ĝt FxP]sH%Ii~73O,t FvraiSKi >:|y+H6Ӑ{F[c ҏq\x*ࠈxpUUd\Oth3G8\qLy >I=qnޚM!NI`*x]=CR1'Ɩ"0m)e[6FԩjwiUMxN'DTM>:1GGdKpCLx'w'D}/ .a8>}m)zq_O2V6T?K9U$ELxpyM]DmK[mpq2w+-y殖vw*ٵW#h۫1]8۵LX%xBC`/5*=OUeԵ%VN@Ŧ,ە{@6'lWܬll~&1BHa ރwy.7vx_>^应\剽y7Pll85u 1m߼ƛ˸('LB?MLEePyJf†{JhK 5`ڼ(!;^%iOn<5Y个Y>:{2OFx3Ԁ؅@.vʔ`ӵ/|7 m0,q\C'cܝYhF+#pG+|OcC=@\dܺ: n* |(kG1(0@afZ_OOxjQ쬌\d(!U4 tfZK 'QSw";Ḷm]7^\6ol$uE^S"RrEVM}JSg"2:O%ΨuH oS@<.RV-b튴Va":Y^F̼UI%a+j7R1]+3}yU ;d{ ;d1~0uȮ?RQ*fc5#A2/RĜxmRPC՜q'Ɠy r8uU|QܾbMǬT9F%t;vs7=+͎IPc^\ }\}̉/Ϥo5F3˵nnPi!F82 G/hvUNQr]Y8ph|Gݓd!RqIB!r\8z4v+ |(ok.__c_5!05ޤfÎ= ޜ` 5'ZMf|_1u(9Ӗ:}$39f爵faA{r&/v56л=>iƺxWÌh6K` _|m7MZ2jH閱c:Tfr&>U|}loc[C # c֯&&1;׿tY-f+r; x\6ߣ_od WZSr@>TM/N1*AZcb"rNmuxAP'p0][LozWoPLbTKSbb8Iiހ dDZg⡶@dqG+?dKL ?A>:њzivF_iC9B\p|rz,DMQpM&x gA`d{xQ[ OULzLfp9@1U̮k#<ަAG ~gABtaܘl׭}3Gx{ | %N~uxMZAm(LxEywn3R:kEg[u.E)npf7QXʺǤXh̷H uE HB0Y9Fs8'>fvUG 5'F;A̍ `jվFì<{ `8[Ys1j셾}0;6"n)m \wPԽDL8w?Ԣ-K2 &pԀByͣ9]Ѩ @dÇ8 c]UX}CmH4yj#>0Q>D< ̜h:8(,hxb lrLdrʵV{7u@1h7*۞_r(9zcsܫ6]G/=#@h]"CuM,VLGFSEhWeQCYnG|4(mMT >󓳓V&X:<0(܄0RDTs5 H/S'΅d)HkBO[A>#nf9kMUS.t Xn#1T+, m3d{' |(oG̷ud&jiKժDkN7|c&'y\2Tڋ,ĖF8>厎WXrxn/+P,Q4G\xϼR@i%+'Ԅ]ZDyq4J蜚1.?u(sz{U<=hsu(xNsv:ᩇbd>;h䠴+U]eS{AX\t Q3麊U\S!.AĨrD)Q8OTwqh(q퀺JUP*RMh]LN(<,UV*ˤӌ3桃L|6+ .8 NujU}Na?< )lSPJ5QI[Pz** ͈lFd3.8\M gYޣwqX,>{QT3}ɢ<;TV.ل*JCfdywMQ~ԣ3]F9E3I9 ˻¹.3u*8q{Y`e}YVaf3e*S.'8Uu~8N}I")䌸aFW4 ќ(kY.yc+76͆)rL XdVy'/Վ;1>`zgKH ,QS_4HT3玫\Ł_U6\ܹ*Ǭ虨F`;U#MAcN0|QSӫƄz1>daE_%Ɔb1,&[->wQJL't8_'/||գE)[]Vh"TI*q>Җƾkyocd9)& H0`#>dU?,~:b2 U*L"QhH%<4`%g_\aYPtS3eg,3uUA &pN!U5 Jb:SW{k'\ rr[.V#S䀹 'ހ/_xNdqN2\hcR-&xSE}gu9a%+e:/fm%?`s2+217|㹞iA]/# l6ˋySz50e͇4}ؑsYuT~'K$ c1gy,Yac+=sO0W[yA` U@+/KWařOt"/#L[^l_ÒCQ~oU?=Y)m%|Qb7D2{yp8_zwۧɥg}ˏd_cxɛYcU:.7i`M" l]-R*˯&[fX+KTtvgO9-SW=T}S}Ao< _ \j}yKN*FEƗCZ' C5"Ili9)͜,2mͬ]̌uʜPZ f9v0}w!T0HSIN٧G&=~-58jY'g@4?F?&rB%lQ?[KN/1 8GӫhKy|#WTƄ):gK郗f(`Ҋ^L,V]l- zjȚ W0f#&5}",0 bVVATO`o^A_jM8:ML0 QVCشb ot^qlq4=|r{m CFط5L YJL*"eN.bo6|A_jFsً~}ES]Ƅkl)ƇKHjgԅCQ8ǭk޹ WV|Ȳ)֥IquʊoEFy2a] &h#;:MI+24bRH*~DnzCţK?D>e|Hzة'yv"uL:BXƌֲ}pFkRʌ:ƱQ;Mg~%{;(Jc P0AxBqij͸ ZXVReVO0_ ŇdrМtX#鿴#\\&!)ar*pYts;D_fPCmEhh4<%LI ϑK<n̪@5,+#Mm+ "yRR Wi0\c$d0C{ zP`L?U}ڥ"dЄ2:_牯7~-toIRԲ<0R*U^fyȦe+8pUl\ds;~: ;D>05yFu_k/`]bH`&2KH?OxW͆dnźu;.eǀ 4ez p)l\#\z&dVuxEa|}S:3)%vC¢w$61#A ?Фx' ""CCB'%ݵN."hsDQ9u"GEJ 4X0m$؋,V."Mkz96H" "5Oyg̖u:WbY]PK~]-R "8z%J᫔2V5ju@-WT 8 _PD(`62$-)N9}3ªTi }41tp:K:ptaHu)0K%/Oi%A]JTCf_G R: <=!2Id2!~A޻|N4kFy0.94)^%%!ю+ PD'Dr(/ӵ+3T-qq@@\.*<4lE&?3}ˇIK]>3t巡oKG?9!9b !cLށ}+RcBY(|q`BJYQ[ÙAr;ϐiQ[þ _Yca,07N4Nk`~qU>Ac`yWUD+m@`< O$c)0MKݖ̑jӽz=$Tv%Kt:X0-%%&)LZ7}}^O{pA45 D(0q=03q%fTKC#zо٠ t9w{tuEa/%WPuNKS:1TJ\DVp2k;>`9}=m]@h ^<1*`ryi̿w6{^ƶYu6sT\)+K$B쑒$NaMFez&Z:u-Y?vO)Cyj uV M5szyKcX5Y"TJ10s̩|fsD5ݽJteځԻO_>S$aiȬMS:^oz\.|V͇YyԷZ~Ç7e5Yْ.e W"tؚb5`3:A‡5ɰENǽ˫`١:ǟY}xs}؁qr%"`})ܪ"j5TlP{vD8}}8ZEKB&UjD"6h-2blEZBPb(n"Nرg}-I#B_lo|cm }sm)ķP[aCwI-Sb^edʤh:$"{{-1pP_`JJjX`]Һ&`b+’fp}$A(t G)a"O&Vyv+^Xj񢀻ċk4b~=*Ez{{FC"-L`D{c.t\]F%jT}Ȝg,쩂JC( +j7VlL[rc-AO-Q0pLƲuS}|P[8FDxs]}YϬcCpC_~,y ]Dbjb${D sBsIwܨEe/h+C% kTøq5bpU'BJfDěQ@x$\)+ Zf( Z1QAL:e|ú!.8`dv87<~CkT6U0;͈׌N#A||iC{eȤ*La4Dh|>5/H)eFd=8vBTi#DU'zn˽B[(øF#fm279x=}7heΠjkPC( xA=a\__ ʭwch5[,Ph9!"9KDKs7ڇqR1^Te'0ͦxs^;N"u.Kø +QXphp NbC"CR3UEKʞ$LVDP/O (F>5cQo'+;UJ_;MWK%:!ura: Lo0$#.3^8p8FSa | }OT`21}-.Rq1!Lh'RG&Jv\0*mVu0?G N ;@h`Ar7h$؈i%3_: mSlV'r;Gb1[dB&x֠H(mHl|nغLjtZ= :UNa j7pipG^|8~xEp3{+-ToRɊěs JSjm bt)RxDJZp ]Kb+(xB]>4י3#ȭ1tj!O:op I1<9@pQD~DLMbp3'Vn^=zF0n"yF\Jlr_*복&kػGr#WnG]x ̃/]0d*Zʪ> f*:Z$#<"_XQ H L3X[^*(x)lٍJ1WA>Õ֔ɕfp`'}HG(xRzC윒ӂ>].B @|J! u(TSCyO騋#'fc [0 |RL9^`O`^fmLjUGО{g@܀AzLZ? r[oCv1))"p)zD3  dNpo. $4mgr7򡫵%!zm^Vј.Ʃ!;}'IDsT^^,<$,_j&7F`Vƃb)WxUcg%7BOoaFCt<7*֪zD*1|]y _0,xY݄n&ǶLQ|]6Bri"-m=*juYޑǞ; yyۡ\8}.DM*WXi'ӊ :8u%:ьm$a1z;6@益A 1@CWT )Q/ìabD;6;D1Ãwvw1D]\N+MD*`5h9GOԖTV$7xhGD bљI-HT蒡h}0)mR,m35u%hP4 1VH_G8揨=wFUC;*RѩҁQW8tQA5.߬o[ zE`E`1Ͷccv^OX/_:8:3F3P;S,m@_uގ쨋p7Cls P*7";xbi:FKVHU?rpĦl*Z4> *o-A*><8W焓xbINbyUZ`[qc j7)V 7%G/rxy(_I!>,5(gw, #;9#W% ߡf~hrnQؠmv6 CD :6q88l_z}%3w=d11kߎNߛѻ6\挴}M.]fUv,.ppu.qÓ*Ϝ4i2J z᳄Q^`pVǠ| MYV`e/E 'Su*`P7\]=i/_a- rj'VyOS~ߎzOcC$YgKA`6˻^%QObҝ)h|h]mwL8΋ r0ch'q(\}y@S۳ ;vv1. -MCΕ,&]ixʇ2-W4˂ʄ$MzY__Cjd :~nqwpRb8NyLY^9śuQ]w1ĺd ?Ami%#>V(_Cк as8GhM\B+=ԩ0}3.,*Wr?*]AA 1V*oJL$ wc\{>"nޡV㽯z{qS}_E[(ss,{yYy] O ub#11O }ZiKBaiW?Ҽwh[FϬnjH]<3ۤ a p 1`+TEʬ$rVGfbB&ש^!0A*hbP#چ@dВTA'”NQj }_[|_GEbD=*ʉJpa#RC*W[B0d-u,{]nD_YFn;aamG| Ui%EG]~Պp!v1~٢x-D *Eb`xK$hiei>-?/Fɏ'1eq177#z ׄ2ҏ {%F8Fk~7c7N9G#EF(sJļ襥*#X_?sĝ=|];#(̟VMM'l}y<\* ղ7xM7_܎CY ϟ]}Kwx ymZsk`'k$!޽7NŕJ_/B+ A)cQT.aMĔL"XvV`<}Zf㊝Yv/&S*Ocb´ϦtEzz;`F չǫFd]hivgNa*}'_`ӭ<ݮ-U#{gzݔLy1Ob auo}ﵾwJ@*֮lU#<ޠ^3:too/_`sxL7Ŧ~j竬as3>KoW5x8ۯkS>\Nwx7'(삘ɨy1:(K] 45کZ;OnWr+&.G? 4t47ׯ7(cH6m P#z7yNA^yLy>K(]}_9Ʒ]zz|6ÐIkρe󙲒gz>sv3'v*|oc}m{Y)/WKKaЍX]hPjŨ|6c K>Z8omv:(uRiU'NXfV6ɸwMZQ+vקHDҗw/VHTPxbSCa`t >A|z#N\9&<|NZfk^P.&",SR ;!n>Kzu9}i1ccK}饷(C+̴SƖ,$!dץJ fYi`Jd LֺNk{Go " bu {88'"5oӛ#.v1đ\I?IAƆ TR$;/oPMUNJe.t aG^{B</boccqQknȸj>:kn7s`PwȩG`.G<3g/}Qi{nRlf6ot% sg eu;pE)b Pۺz3v!>GgtHu#,lar~XР%.@(st*n-ILFxK*PI*jI2 A[K2W_+!ZKCXK1V0VԺ帇|ZC/S.Ke7qZ m"*M$U6E4>C^s{g%y+MTV4Eul ʒj*bԚTiǏ5#>IkjWr-ح68ΐbN81Zxg.2aPq ÇE]\S*aVXPREUP;&G IvK<%!nlf-k6&Y{M,9B on> ئ\*+~zeC2s6ICK`@{Y;MJO+RyZIG97M,DNjmtKtSD¨(J,_k&@l %EF wdvs8_8?ZQn ^p,iw$Pr>dDԂx v?{ȍcZ$v $ٗ^݊e-/݃=dYT6K"K !Ϋn_K8)@"4]g|>w28xٿf:Yt@%E%#D7/Vsu;}C߈>_\ $}^_A9fÙ-CVcOI- i_LH^ŜV͵C3~e\>ݲQc| As$vN;NWFaM~Hl8Ept;(MV+QR0xZ呈[,>1 V0ܰjAEq^+mG&2*SRQۏFZIA!%E4" ^N .Xd́+ pIC;ɟ4*%@@i4i3\XA尥ldGoYg}m,B2 6ad(H/>rn󘕟Xo@ܠvzzhE=2X 3`MFP.ﱒQ+E0x%2Z؁OrN_F ]k;Fђ><ǻOBhQ +ύzc!o-HQB#m4(B;W?w}䟮盝cᤘK=]l?Z{WE$6 Ixd>@jД߼#+HCFepNdAHs$'}z9k'D>xYn }r( ܄v6r^|?+ G|"h#N2jS }Ԁ! ܐ <2)q4Wƅ3]MQ>`Ud%A-t`P##2$<'#6+CFep =1sғŰQt@KH4 q,$l^KѰ8t.7)?yO )* ȜJ\F2488.ZJQ9WV0nP$6X,7I1,^m,ẀB󓕑ry-#`vYiA70N>7ܷb֣M HC4[|EʄiP+a$4+h8SFH 1XF7@}OSs%28(zlRl*"ap``$ʜIf֦)s^Ȣ Q=T#,zF DZdG"Tx})2FcahzS cYCi7|lxO6kcǗY_&aoc+$) R1mcT8A+#@q| t7## ,6 ǯnz=\ OV~|&Y@V/|dj2*#֙ѼMKwȨ NS~o='+YCFmpt3ɨTtkm+{؜ECFepTX<^_pV3( l gXW+N]4CFep DR07Q~.Y2(@u=އlnd {1ve0 l[aDie>KJ8}*R!A|î)uG^NfHY;rI`b]ADgѴq+fY9@y߮G4ƀhC:.c&as3%G$uDJ9z2Q*=[Г;d%b~ $m,=nmT3m9"WQ^߰׬_ÄGPҬ(+Ҹx}o~Ullхv T˪4+P`7ºn*!yn r*5Yuqj_Mntz1#IeRg: c{Q" ͍{1/\)~wʐP~4e} Y} ' ;'r-lrVPNݥСNF@>g!jJ.O95U! Ye .}Пz4Q-5]OSaeKv^Y .\Xegʀ(1oM;_˒7G-Φct;6n$k)ݤsD`nHsN0r%xkNA:L ۗW@AK.IyEY+|y,2*ӜJE14dLE(X#HAkH!x[@ cnpEșTvP3KnU[`E; N7mߏb5|r'a8XlE;&ݚֽ74 ޤ->?&GXÕJs0iNdW"'?M?#?@|:7`>aُROE\\~ k?MNiͬ j6_J'0>ub/%z/I{+9/xͼuf&7n"Lſ>?/K?f7J7@ƭƭ/xMfi:K}}-.O'{ iF7q??o~~0;2{.gwz?{wNj~I0ۥ^֮#p;'PH4)?+5qb0̂w"Όǀ*4OwOC/8R>j?~@+1(OC z߶w0Gށ.ala޵dzb,gq9(ď?m{{ m,[|hw=DDj-.~g>\(DUpJD !˨A ToA B_zڳ岟O%"yd'>(\QMQ!`w$)Z1 '39<)X?f|v[Ue;)kl{;MszI`ߞ2WkMb+ x6mJo1-Ol?b]1FҮت#ޓ8NY5ɻ]&H S_7W|2W;VHS+:d_b}[eKLhZDŽFȦQsn4 j}9#ȼF0dџ\]] 6Oפlxd=.~]mF MꕱŸXL)B7% `6ƆHH\oz m0VW; O w}f;˯0.pd;zu>t{f˧|g伓l8n6BS0gGqT4w>P_{"\=! ,%Umu$s8jo8K&m61R3-p+GtJaAN)\qOI';{Z3t; Jm/aRk_3=\~+Gt'#O%̧w3uzL;|j=ksExCA#CqCTRYks%p+W}.%RC!msNʲ q6 -\d.=_h43/"Aa@N0 aF8*W\P bì0.I `ƞ( y0pV=*3<Np,9"^DyF>QC:TՐM_q(BP6 XާӎtH}"E(""JzZPW0UO>S1NRF8.Wu?"xa S"x'czg5xy-̳B-#ʠTOߺ/f4;#w7;u-xɖ_3{vi[y[쭣_''ϣ؈=E!<2UM-. sza?Aq9D2D:Gg;u~_N{gg>s£#M C[&?ygE$}Oɩ2m"DN CŸC'W)b$߃={Bb.TD<"F$ Iב(@1|9S gоs,=U]GK ξq D_TPfPN= ey]}aFb E/ %t$5pVC#6j ZIOQoqyh@?W'9(s(a?qL:U|*d6U9ǻU|l%瀄i;{{˖O5|I.o!O,;c) 4q1Uur?Z=Xͫ;yx u R,J*9x8eVoZ_FTھ aá Eȩp䐇y;8V>l}LAeHRߜq}0%[`w [^JoQ"nwhޮVnO:?uiUWwQ BʈQzfWzq7meawjoٗ Gq9 "(Qɉp@|LP94)f ^Jh5׻{7!3I\}\QF,psL\UaXP0!"/>>*J_)"9,`! ܏B$@p,@"S?pi(wjGp:{Fլ׫@^p0_}KIqȤrHȨ(Q_ ^952e~Q8zk$e,Jp"@Cy`y("W!E <""J~g oe8"`h`/) ( }~0+i$UH-jy>2Po.gqOizÈR$#*|bXH'u)-ikjՂ@< 0x0xHI|@&0I]2I,`ӣaLBaFzج h8bLg,v \$P8Dbx:H0 PnZb%םvĽhA-jk,WF[tlaV7vivX4@W~_}۪u o b)E|QX". ,zI/'=`%\f9[nYņ&T ?i$bX$1D;Bq{W{9jE`zS .?j{yXm^yߙ`xS{[n"4oEKG=KPB $, `0WF!yic)oZ{V)b+c%XB.N*iᠩIh fjqJ䦅~=C8/kGQr&Vݰ/@dz a W3鯌Ce[Y"5 О)t ޑܸ1+_-E<0u=>Ggx8yeb]_#Dzu7rέ}%s=ldp e[e9qO 7[{]k¸'sZ?0f.g9u\8J0Qʜh"0WI_ O>^hr.$3GLHCb3,;rQZxzHxop]~%}GJ^nz΂AMגa4}fwk|xZHT5TzxtVr=γ3=v0|ҿ2޿Y\B~n'te6aPSjO5!qџE0ϸ}Egp9 X3QyNozULTWMFeی֙LJq3Zbdi?-:z@rd ,N`gHCN`4>hh<7X7O2gb%e ?vJg:QEVSJsHaxr8Tșaqm'_ (2,ԼQ kc? C] O6U*J6MUZ6fOVC~Z%gW -ݬ+ fh)XIFsoCg66N8ser V; I-$e1%\B#0rS;8]L('ۧVgv:s?}F,餃)vNqbg8Y*c&ӵۋ]AQEʲIp?]3YPZg@I(2B$d 2}fahYb;\/iO5[ ^Ø}iXteg6'ߝ1j7Aps*`ccug.ވsliֵY3I.ڼ=-vtҺM|=aɸ09^Yi=8Xwj4=ܶx*6U& l@; oUI+iwEwA+ż kXwu^Pߔ7aI㮌vhWQ4+YKKQQ4+gṰˠ}yTᳺG&|)4Ze ez7 `܇QW>'0A%SeotlV7_hHz$U~5s2Ɠ̖QƮ^_']URtSRi Lp&4X 5wH{7Sނ ΊyζzG;Saa(?;Aj&j{d]W?8fF C<5.&Zocm1vf1f Aؔ#ωḆ1QdCy. y&98fq_g_r`/z4)WJ"`AmM\vTKqPDvm83gnE\ɸϳoٷ꛿5&;&mHF,k4Ώm~͘⳵l->[M:MigNy( tnQmuZ uSm6kv\KPHL0;ѓҭi+:]U6/-_? YI$qK xp#7ͮ`bq+D_ʩjV? G͘L$&pS4}ʱZ.ͺ2H-EYBvi˒!)o4P=>}k؁'J8~WQgoˆ!37!jNDx,?P}13K 2_; I싊(B*|N6A^k0Q|W G}o ɁV3ZQH W(n ?@yz2*Y(XkIYC/.VM$G& إ҅C{I].D(Y--Ҡ87_ocH&  oe¡8?y|C-{SFmQԛͫ=뫮$PbM?e?8#fd.ኃĴe jGVD؍{*Ƴo!RA^2v0E39x&ZqO7Y~GXgCbs`ȵr!}["\ Ê,B˧=sΝ2Ø;pzbmO]f{~pn"+K<|bz$}aU%G'wKR%_VIo`gO!x0J]4- cy Y)aF&`!!C<Ʌǰ$ 0nBx!6)L4MN > YV_ [rj$>,۹ 7YۼؙǵD$p}N]o8W۹~dn] ]L;t{ERQNp=J,ے#۲b3{|7qE֖rW t=cҜԜ20f>]-uasYlq˰ĥiT1!RTS-`S%(M ;o[{E8ߋu(͏ٛԶPkc:;&wP=X#$HHrc @QmE΋V{1jb_/Zn":n" e^8ۼM Ջo~醟F@=|[jc}_5-1jm<>];woޅJ[_]otytgFUQB74}yj8eoϓgsȑ#ZFv.7Q Rx UjxS8aGhnPk:Ѩloލ>:6X{w~uZz'z(W|6-kىߴrtum6yrLye9ZlMz/wC~XȽMQGWkݣ76J?20l0u9\9O>te!L;6W2:w7Ϋ?e -yr9w+AhfnKs ˗NTkEב~uu\V&·IOENH Og O%h TYYhwW>(r>5fan#q4#TKk`scϣ7Y4r^LiqxFm21r"EcqR/u ^CNvP Cmj6>׽Fk.? z!J+8#Kn[_ᇳ(ٰ,qVhWgwONg(kj4<``k7w;6d{M r ٣ͥЅ\ޓMMfp~0^y]0?E{+YV_,_G6%YS1kB p.틅;,¿\>fr f" iz[dJQTn磣fԉcK7tZ8f:/<*萗CCf Y#ڼl^A'-b Y2 `?6ۏ+f?PkCS!`H$`?)ݩ]Wʍ\$D1:XYh1V2p©RR؏+}]^=6b 3/T>g7sQ^yKաW5 191,p&qԿ}!%N]Fb@0;v& "$d/ gmfZ/v.}0f<ӈ65K;Z~JVUK\ :#9XOgR1c!Қ4ז ., $Rck9(e'3(SV3 TVmBLP0I86@INY[gG6@)3TۼUM6@ν]R<,>XF|J!d``6˛6CױLEͯyMk[M@"Td(s2MA8bHMCb[(&u$G.aBLM2 )% 3e=ކZi a Idy."3M4uKӰƞU"?ZaAҳaEq"/Zreng45#w7pmxq9o;N6wA[ÀqkxcC WlSK^\X? /vaAaE11)IMsΤWk}x&CܣQp+r2țXyVZ͈ LJZ/}ﰳ5yfUN1S@8sL(PE穯2e!+)H ?@4 "UA;q:fm>^ h\l[>QE-?f!:t/K#qX̸ }"Vrb3-־5a'E""&̇;.l +",ӜQ&}Æ"˙KXJhj"0gzط(7|lar;| ]E 7a F;)7)%njX{_3Ԓ8Wd&{.j,hb ifDMX$feR!; %` ,!=Dapۮ"|sTm^]ğ$S$E.Uq-LXBET*-ue #H 74 $R [ ľAB:Bu$6o"IK>byxA/:g?FxrSxv`CZOsG)oP7c./sL\^/3!/cs~l\?ђ+ ÈʪWb|GlU?%xydև_4NhrTFj1گ@X6YS](]xyy1]jPdRp{uݓBF嫢O1 Ǜc$>ϙ?K_D˷|p`eoô]o e ^Ke~.c$T`2&EneiIh<_^~)*֘ 5sr''zz=4̎F QA1Q(>XAg0ڟX,#AYqI%z 9{(?EJwm#!#~.?Y[ ʜц-b+B^۫e`76:.O;xV̟e ,Uh񳶜|qJq ̜LHq8B)-Pqn}{ξ#$p%r8DKKISXڔPK]dȁi2-b&' 'L0MvOЛL,PgoC=j`6;jc9(tUQaXX'A(i({ʞi({NYCP4P4d eOCP4= eOCPxSXfDjbX&VjbdC0CXߴnP* eB٫UJbLO< ߨ,4#^F9~ʀ&̬ R˃#JytvkZ嘎}:lOqBā(y%zͫzayl9Bw!ՁZͫz!:T" V7om4lZ)]tM[sszBIWn#=("026GWmNTɡI?Jdۧl5xidS"wtPpVFU=]ÿ  1cpa6V/)<:'3ǎH7E= +i |`)\,b2TbJi36}[)YC򽌼FA:}t߼V X3Vlrþ]O¬o5!;L -~,2(XDiW/ Xd_E;ߟ2c&i`ML Yl[ae )I&:CdR̙L3Ml;|n}3/bO lRIQm l|7ZMo/<*萗C*ow$˽ͫ~# N:'} Y2 `?6ڏ3g?e=嘟ŷû(Dg;]s3xq=>/([Fn䓁=X|84r/9WxꢸoTx4Q4G݇.$lYR0Ee泛:}onk?gÁWO8`gDj%wv!JIPRpѓ# k; i'V6nMχ^r9OF|Fd=z" %ɂdfD'e5t:`6(DiTس$&,BKD?"Uk Ʀ[܃cLH]bq.g{yK6װ\5ְ~7y1½.b6ᘥZJ;k4rBGN9oU{/W` ry%LLRZ-+$=.!:ۇ #gv{SUjs "Кj'7Cٱ\ ;w8ny kX1SM۾A^PqyҹvW{΄<:+};.u(w4݊c ^춸JstҟWđ\lorHk7Y/L67tUgyMx4*6pa5OFOmtpljc'y6 |n$I.n7 Ẁʂ ÿJ:t?= 둜}n!G.\vϰnI 0'׫uoU(0G po0^>Y׏wyK kG]Y)IR72 @ $h,~pgݟ~WwT؅|xT4ef p~í<7U~Q}Q=,3J/?:[}jkYSl/ewezlNw8C(i`14 {ⱼ%z0M<gͳksrc<{hv4vۭ>z[į\VVg\M~)ASG-Xh)$"re07VvFV3j0P++ޡ++][K+:}hespvB0Ǫ=\n}~|3 0X/=h%kYe=ͱOLȯ[{G]݇tf(yO]UuoG _\5jz1[6f񇥛ZY+Lo\wp/*ߥw5qf]JrN[Dar!mVx#}`kھINK+o#B' Zћ&Z}=TɟbvL/jJS{僣t62>YA8I)&$9Q\݈yZ^7;Bt}eUkggf%Y>r -[xsD4h$MMdz]oS|?ୖʶАƹ&Cl yV_? <*r8V1M=S9$|? hPV{RS$yd,%I: > ~୔^brP2(ȂіyJ"k~&IZOZ".TuYp ,)!Zw%<- K"[DdYVrMZ? ~ವ'+XC c31r(49xIjcij3ȉllBxik^[yt2)[<96h/!~AY.^j.e@U1l͎{M=k#:Tónjew'ݍw6>nSSE1{`) IBCht% $7FZn}[O#7PLO 6$xI9[QDbg4>')vz7㫺.'xf`4ZٍWË6. .1>#94u X˯9aA~uCzA!]%I%j/XcCK3E 9&a(ӷJ$ÕKS]:D`#<RЬ z۠fM d(QDXW g`!ț<M)^@ǐv%v(o |6}F$cPI/_PLu=a%{$]+MڈO_WҚ}q2'_bW(W`cIxL2!๯)H@G@G{Hb9BftX-LiZ׎к%Wtjz0d&E4|D& ABzӳB?BJkތ6d=s 9>$%|wPHZ3@<rzAdZ7hZ+Dm?dʼpuO Ykp!=PhުGʮ7E už-!`JۀWy3p:fHa\1یBx5xD8fl=-dV'|,R2Bx~< 8ջԠRo (/{d /f,Z!".sEl@o\"gT <ۥ,}=/ܐWpdKkvsvOSŷ~{ăyKFe0@œ3|'BEL\T :JcT=~Bc,1BJsDHײ:dQRII%uOxme(0DnA YHa֖#DŽ/@xS;;RbHYea x-)4sAXsp]W^+ &в '[x=+C(4 ukGgsQQSX.D׊71`m13)!̕ULiF1xC(`|h\,[_@32k8!!ZGf*y`,\bzaC(jhTli(̦yAVۓ%J~n^TXԁBҒo QB݆d9(4_=,(w_]cTZ(BVɵ^lDyPX2-1-]_l꽸8& ZFIe#![U91k)Q6Z!ZGۼh*de^U \BRTi/OxM)Uk7Cx>g˜3)e.{!Zwҵ>@|UTeFёƞ!B2)cRdP/ȿ #@x zI,RJ1 YD^y2WqgBxe]ktp4L2Wo,(%nԣC(vu.4hXLLQYOIy?B \^[hGI|gj"L=b?B  s -꺱Δ"Fw}Ph8z惠 3S%zAPFkpRIP΁VB@esz "G @OK!Z494e&sz|^Jq!ZJ!"W.r&^Jњ@Xv9Ph澵 I*12JMV%XX6#i=̈́Phު.Z KS|&TB:Ph)z#W9k-IJM@B@pIbO3BFJz2=wPhI ISZdD}:?BZlF `֑2(妼GPh^9ZsIL)[3)' HFKZlOXkȌBlX2#f!jUYOV7B uS׎kΊ7v5!8Lh&s,] O;-NHQrya} ܯC[V ͧ]?flYvgoĎ[nc#6Hu@,i.땳Vޟ\^,ۦZtzg~rz}vl>g,M/yG|HۛK8bH>O.آI^#Fa;-Q:Ng׳83?O^Nok]2:8XkUr[b{JqX߻nbѢZ;кh6r^bim4 ԢAiE{XA2^gБpoEhSN^n Q $/H! ω`q(yA8km,2CISVDgѦhj6?}׹0_t6|TiN~_D>SO|/>S;ɭZok:w/r|{lVjǓ7lFMY+ƚЬj:t%VÇ11]va3M7 &-s Éjt.[4Xxհ#i ӳrM7y]LY,{vu\g`>XRq2|H؎w|[A[]K~+'xCǏ7{ Am3m]ZwO>Z/u;Y8(`7l\!4L̞{4d s4Og.S5hvѓlOSrtlŋ ->{&3P3+:R[{-ٗu#^#pvÍҠu|_rvǵT쳟YXqõ-/fNU݃/SAvWjǬ=!XiVD_7{~(w{"N=9\#|qF̀'vw2h"̇D~+^ls%d[5xQN?an>I C={D{1Ic8h1|%8X,z0!ܽ׼k\l13i նyWa4Tz?WGs/ ?o;ồ'x9ommwɂ(!O>eK07o!UR^TR1cQ1F2o0U e`f0d61*$"BB?U^h p ʔuQK dtgqזƓCeOgD&}9sv-|\t+ٸA_Ճdޕs %aT)r[hCd&'JnCw`߷ebU]P#y)~rtVJJ~Mȁ+(\Ш)kT @y8F2$Jl4LO›#Qypen.9I(#'(!ŌͶ(sʱzb)C"-$H]$1e|Gsk9''ae=onc)aJ8f:-MILB॓hS(5u*rCI@0Wk|Hm5"ʘ%?_ C|Jy~" /u(gmuwC5|ϮF#Mat4V `i %zcD2XJB!2e'\ 6* pO84D+URǬCTkΚ'Jm9o)l!\߻S9Tlcӯ*L&u,^\=-~Du$:|txUd՝7!׃[sAD@9@}S++!ha@ `Xs%oȺ 7EQ]3kg`2X")8ܯ_k#V' \br}c)P^pݴMm {NL,շk&}9^';5þIlqY7Rsw߰pd&{ѤKʭT"T_|f׫W%aK͊\ad&ȿ^ϕq$_^>74nig5#2 tR0H^DbϷ 0ҋl4PV  r-w8gYdMdeBL^vN?K/()8@lr(~4&_M/s">ArpIg ]58Uy!OH/M@@~, #7dwۄ oJ†fU}%(`ȌęhՏݪ~QݽѸ վ]}HLoՏ=8Qޑ)96ۋiՋSTO @9e> 2,a,2LӦo2XfWk!O=x yI^?Hs:h><-S Jwӡy3 oB `DFF!qH5d:%$FDXATJ_m!9e&mgK:k#3N:k#(֎|ʖCz CФL!?÷ZAرz!źZTk4} kF,@1֞ȒR"G@JUXncͭZ]tp3Ϳ_^y|9Y0i*T Ct)msl߿*\Y͑$MFwWO(Ah3ѻxRJ3sq-ٗZ11ho]K]l>g{@/p`b>O O“S3F +w>c7(rR`f%B6Bo)(-QN(F* )q`T(JQƉQJxbxnԽ+ҹ=*eh͜q9 x-_ouE'B6QKWD1oG<rpsڬ"?qO.ZӡBI|+:6Vےp4de9Ǩ}5o9к&餁}l![;:-Ji[`]mlFA<+ p>6U yWSZLY3ͥzF^r$(*!{tn䤷t8=fTIjj024oy]N:d,~R2'sɵ2yE  \s8Y""ryV4i(2㢴`Ah-tN8+r]Kj UJ#ٍczqcz>MfuIoG19Esqq=Es3sw%yPA}i1b%cp^2'd: /v<^` > ~c=E3kl[S.O>ѵki/'\YE-FhHt>s {ɜۛ{/+*ˆZp9 4ª@DZ9\4w(}'i?Gy"žbWӿY}<g-+;MˡIԴG`~`έ/-nM:Ę*shϫ 6}xq1+LlhT5m/㊛xUe,(G$? ~l>[VZdWE'*ۀр8ɣW¶@12.Hݜw0*/ i2? x(q [>:>OHu}v69b=b?=ARc%|T,S W3ts榸?.ISN<=LΣd qYsGfG㩙} !&h "nM\TX|BֲY¯LŽuv'5m|y:mu?C.c69!ֶ#֖z24OvX+ qiKAۏ w+%86z>;`,n庁k w" }$s_J1_%rug F"P4ĺ >-3jm7${, f=wgJR)M٬N+ee-̝:Ўu5z⅕řfc(m:DX5 Фzԡ>t\k W;+qMatIEƘHfS[(DP2XL1(U&rS99pO8h%3Yk%,t:vO<#ArO6or>.ާ_OpMMkYu77]eϫ˫Ah#BgWe[FTw\}[sAV4T$ubj4FFb#_{?%6@wm$I4m>{11ӳ;z #23&Z$nb")Jf:J `YqGdB n[lxliM>Xc5v8i1x)=< N8pΠ`Ϩ=v֛rZjb|v.1Ns:T"y'X>U#*M*g_RMΕ8 sDUmt֡}dHh; R'\ώ4# U D[%)Uv^t4ާHTH[4Y&MAC\̢^*AU֕? _r͹ד~Lo0e7-˓I KG3>nYn[Z6+:[oRKs36-?iZgGM̿_Bbח$g|m |Dw薰o&^t {wiҼp}Q+?o72si߮jmkZhzKuo[ZI ӻ0^E+tBQ&ן%cm}hĎVky.]zʛG:?yӛl8w_.fMz7;.ۡGWI&ʿܦ.sU(8 ;R/~Y^nixNFmA?{=Rx41J5լU )s=;ɯ[{lYN~=^u=(} kG KdYhaJI%)]4W8ETMA\oTLP s.sh6qiDxd{'3}]r|ǑLSC +%uV/Tjz^*{ҍ_s +VQϹ]T"©9/FUl>-hmj&OIW+]fw= Zױ*G%pq1 aV]H;]}ˁGZgWUM-:5Z|T9T<:;٩n{=T!̻U/SYfemfrYEi2 %TrQZ4( d)6;*|o kˑ#D"E-!+3ԙD01!Xru}4q5Q>=9'zkϷ,"&o5Y #$EeBK\TwBgOy:RBXFrbcTojLW!R͆^wcedYJ.&m'el_jT\6Ǧ#S;겧cP[+ v1Aq*Tߓi9J LǞ9~K݁6㹻vF2o&/0/MAZv~zkxW@jNd~~/HC 9O\e?~{IVJA_.i7 YKqygQb7>Nm<|1%ѱ}?бdrtSi(dgo}1=_/1Z%rJkKYGV֦vjFj3f%LA:RBW}y>2jh[ÞvvW|m89G:y7|x&N:lHـ]]0;|2!8SSuV* Ux*<ܗ mט~R-ƠN2v.8#vw9KpP㋃f0B-B[`d1*2O^|FvZE&w{L&xtnv~W'egy6?^ij:Lq{uqS+Xlt~e>Ya}wz%rxyo6-GYE.`2i)O12sNdIOo}j<+ RW4ȱG+o{A#OF9a.rYn1ͿΏ[8w4"#;3{1^|m9^jC~W6{AF`7u#`I2&r58msg5s{mNGd~9co+1]x&g8N) w?[30u? +ca(@> m^1k/|pp7GZD;#Ȟ.1Aw)2)jȤ2.deQI"Llu˒ImaW<~p|uՁ5ave}W,x^~lfO] $QVWPSl0^HVv&d% Q #n\CI>mF֢%T(A"*)rѪjX^>?@0liF#x ƍ)g&fX " #sA/- (WkTEM&rBAǨm 9|Q< [iR o!:IY&ZX#||m liY83*$"tSVDg,8Ěpx^{,Z˜AV2dY< ZǤH; [V9CVh4IXPdJ]#7i !eFGëRk5x6;_GBV,jbh] (c@_xyzD/7Y(KFKkv#X+O1u!%cB;o7EͫS5N[+#d-ATj9x{ioaxڥͧHX m@qu$~OеR|M萚 I,ю)v 8G͋Y笃S:4JTXxJ'+UK+%҈'`EQV@jV YRBvvb$}addEގ0l/ h "[]j{Qd` ւ`*^vN0V( Z-4C&q@yU+MxX@A[4T8i÷4$,FZo`kա73aLj˚C`^ LV):VrLaa)5%,F(ZeX@9X_*w!.@P<J0TI<$,>A1?md 6W[ sUTi4m5b@d Hpe08pqLaB`P&TB:i*PW8mr"$AaA5$mȵ<,P +pNc2M!,@C9c̯>8Fu0 #BdKcg sl L t֪C ,Q kQr Vtl@% b#+\۔ASdh;JY"\͑)x3fV_@d0%] m;vR" .5udE\0q5!ix^␢hO $k!Djy"Z(Vc KȲZH4Ұ*.P% །{`_ҭh1.EU40_cMd/rvp')A"Veq`V9{;E>n:"KvEt\4Z.UFlCNZeF:.3\J# I V 2VHaL5}4nh=  yP_R$inunG̀8zK VGES ԏ8VwVQqf`%g^Hl҇Ux~2wAjx1* 7D>G.c `1Bդ51 haap'5Iga@AK^ /B!K43ZB&WtJ_ՒmOiFXFHG, Ӑ`F+foY- B)Ti["XuPX@hF HӓA>OA˖%L`M Ŷ-4,G?hn<ۼ"($'MaI1* x2+@8vDEu7 +/V0 . 6CTEYo#9ǿPOfמbbRyY֖-$]ՍL:SK)˲cn()I 1Y:+| ꌸ(KEB}3:0Ul,йpiCIh"60*sBDZTcuU2e<;lI3x C5Fm'XEmY-ݞAْ #h4׋7B&W W XxڰBoo,¸N'XJTs 0,R$p-ol/GJmOؔpv`E`[}|a8֩ɡDSXXòrv/ .xmoQ*ɇ(`qu0`,W쎁XfXx$/]b7=hҰ֘0t+Fw2Ym`EN[M1TYW Ia8,.-/:N/"c6fN$^)pBT.䌿}^_B#b  ,teDfWOO^Z FsK1H'$:G9cs,7?Ob/>mcjUGq t'? ~P`??>?y~{}SE6uVlSl'>zmkΞȝMG (4ꎪk_#4aOP@ki2-zxO27 $d6ZvWnԼ(4耚J# t7x·6^h_S,mTsb[-χ?=^-!e^R~? ;Շ/g( ow%e`).]UbeZ_= Z(#Lq~ݏ|orB|"7e6P"Dz*%E.cTy>iHo]..H&?L{ؽ$}ݻmܱ7_Kw^h$-m@*`B@/z! ^B@/z! ^B@/z! ^B@/z! ^B@/z! ^B@/zR_^+Cs_T~^Z^^FN+G؞L_~JF| ] NK%2{AlD3SῲS %H٧('E %3柺Ο-[q'C0XT`!i ~{GE jA_ A l:>4WGjn 9{&**]RˮqG ܠg QÇAs}5_Zw/ͥ/ÿ일0uIzCqzYS0a 0kDb֤JF{Lޙ3.}i@qW~."VH@}vJk %)H @JR %)H @JR %)H @JR %)H @JR %)H @JR %)H @JR^)%Kzv9cLJVWO V*H =3d5mSwd2-\2HöFq0 I7]>ӱ~LƗѼ|o q97roJQ ۤ[AC7?G?m9`=a dɜ{7:|zZ- QXjV(Fa5 QXjV(Fa5 QXjV(Fa5 QXjV(Fa5 QXjV(Fa5 QXjVկTX`q5֝KUUc6뙌vD1 Wx<w&O??/?$^˔!) ,(R QLs ^}oSD/“zP`_G9)'X!hE"-&Ks,`"B901OWoA.I vr[/Fe+ҡֿKE:Pv}_j/u2)wkk|G9ek :bm_ !k|5UPyU=vIca1+KI!7L cN\2/Ӳ@)|y4+Ķ$xE`idOƒj"iNqLw~-qy?aSu( LLd  H,Ym.u:ʴg(𛒊(/ ]VT:I_/t9S R&%p!!5 NNY/PRL̪:UMFOWۧNo dYصUTgIsH,W'ā)tH_r},,k#3azN0 ^MuooKґRꊲ" KLx^l:G04W̧`i[7iZi.gnC pTp!kQ]D?7!/Y*^;-f8(؄tCIPKCs|&U{:XG8&>3Jw3l^|-`n #(WRA T  HA*RA T  HA*RA T  HA*RA T  HA*RA T  HA*RA T (HYqjSn?z~H%R,}A/94((>VQkώ/yV%gƗ<絖KJ%_D>_"_?=D'DێIʹ)K|N_Ѫ<#8uL G q&[F#~F>q@/mU7{M۠p$"NaMl+ ]ZlBU4A:I$)cYd0KaJee(ᜭm ?Or~̈́i[f.b|m]ɽVmj6{VǛ²F^obk|?}e0|,Knox ņP|QxM)<|F 믷I+9\[ߧ[Y 0!|4^*{tݳn[uãJ(չ%s@cMC Vh` COH iNТgk3d|vkU[hvqY,[/~-G_-W ~Wn^t?P鰥wV=1mr3-3/ 26w]~t6ĻP;[ט pR.vNJyśi~w6U/:܌̭sqv3LhaH=U:J{- ϟG fhQ5cr/@/^S|C\ºqS0?}~96l͝'s t˰g1ݚ嘺Aqcm좇/l9huW&t$,]ݭW9{zD2? Z [T@t;怳5oQ}ɷmpޤ|hw8;miR͏j ׀kaEj>ĩ ^M N wuKӥP]k!˾V6>36S&nYd)e5F {-LBTTzYbα74Ǻ ;ǶZLÙ33g &\^TI,Q{8|7q}/^_+C=l ^zd&/~yB'cY4!D ;MOԜ ȥ4xbi׋u^pzd({hl˰{ʱnͬv\KJr VCSp] 6WK}q">Ԕʓm+KUaЁ떋@bDf/H(7_)>uc>(wҝ?["O`^PNW qfg-1 .y𵈞#}.ROLgnjXG-YLˮ9%u 7@< O7jĆO/ cFỗ>/n`vM^R$TJM%+7X&0ma|iÐER%j=NN&| s̾4-qvE(q1DeQɊrXbiI3 Fcpb&I$oKnvH)h"(IܫjctC t,AjpƗ{e#Zgx)a¤ ڲjp˙'x88"T']:Ԝ %RREB-*8:xzW z:$.1yM%*"5pD|=j^+8&ҚCY gnC鍜,($-}̭cXG'uo]yӚVEPf`s9-]Jp Rڃ[j+w"a.0 \6s`.e򎣤B+~Acm8Bs 8WTx-z y??q3on"ՖHF|',%bL*rc.,I*` JSdܜ.gR%zHQ,eR&B2B ieEꅦ2krʪP_U:ssviq! a73ԚX;ah'4C{> Ӈa0L0}؁vp͌Rh,kcR=uZ9|<6T9ܜ3fF{VG=&HT a4\pi8pa&L3 g&Hi rG/X$`w&$0,(bcDi1 0lՆG T*"6I9+l).K9LT& d`u{!'b\p>'Z dȢTsXxF*JװΚ҃o$pj;WcRU\/m%uR5;m5 aXOћZE-4U/ OX6N>pn׽__l?MFNQP_~7NE }<"fz^ qGU0tǼ:Su2)w]$G7q`:FEJR P,V/Z术 !WS [&ŤW$2XL(n|"tVb6ܜgE{;iSgg잪gUww\hΦ P:rGy'xŎ?B3X鹝 n޿&һT:/ۉݽT% |;]ݻQg5/=q{}OOw|Ș?(kez*SLttĮɾOEKwaً2/ylp? .`묜$޶Қ2v1ش\~1wmHL韋2@U=;[$ݪx}.<%)!)ڭטIQ|ȡDp*1FqT{ٴ8J&Rٴl/TΛi_4QIl ? 1%oR82  ~lGLN>d׀Q|ySaVߕ[n`Aohf{EBYgOZůj &T@9L"Wڅ/0+P [hjʡ}uW^7`?n#Ɩj[HPšNM? a"E߯ "W`zZZo㯋V8Vc o!h:kcnkޱDm3yF=W  V C3j6g|TJ"pTFRƊ:#`5I67ggO*\Iqi!g߸\/k26yu}Wv_~4&o/q![m k*; |Z~<[?M~~ r?J37݂mm.ljFu!a1#v^u:CH'R~^ kޱZ.g_: yrD%u$Pu۴dQ#[S;oy'g'<Ά5(E]Ot>K}ٚcW>#=YEܭ4 f]Wmx pr0^,ņ^&Ì7)S]3+MYQP9s#J*nD-lo*it7K],hc#FRp4T@^@ĤM_ 􈊕rt$'۔&nĦUV(;ǵ%s,`jBgE1ѭrTv|E9P^ %Jy&% ^VR2$ŨƗk1Fޚ6]t2e\U*s8_Tη_rՊU*ח -ŋ D(}nKTw&AiHx‰"&X+n1im 77`Ysn0hڴ|rWL[e2V6})f9=FYeIn`-sLved(p%J H`μͰ |e|;;^1ݭ-hR 6Ljpf>ۼ$<,$+$3 f%:I:{fJBB1 %x75,vbۉ[A=jKlljdGm:.ta.藴1ixt=\nqUIPWlo`E'Oaa&WB},oxuDž?<ӆFm-b̺၇T&miYVL[ĝum\ru2ɤbkeԦGNfхu]z=4ցo(_5I'ގ'F<|N-݈7\{5fkd:v@_M&Jvu Ա+WV8?yc*/7_\#\[;=T 7 ʋo$:J5j]=rr!L4̈g=5N?k4q=r r`e#[knmf YG^i bLa%1e;ZR-XJ_ R.8eQ#EǴr_8O`'N!:6l8-sig|~Dft4߅hwI氵Xn?Z PC| Yo`uل6"@a,F{)Ud/`!9sXZ "o^An75WhQx"2!`V!1,HŸUR`,@"AQ+Cj+%C^A}x +{nam"(FYĵEa5cQSZhEaF_"&$T[SAڠABY5 "H]T(ψT֑U[A*mXa~n (0|!*&Q`Mw-6T9. CdCh9ƭ+&W[kzLPyY o݇ 5<ﳿUQ2b5x'FQY}7:))P/sZUr=??n@\)`0¿)l6YYVǯ|.PﳿWN@Eý|T h~g2_WNC\vKl֩g?QJcJU:ϦףŏrR},"ެ+kmom[b>*~Y0tYW"SΤ|J8?':ރ]٭?f_z&?XRD&.A'V=x7ۀkV HU`N7᧋rJa!BW6|9GsRB_6B,7pZE~epdųvg ,c蹶] q2."]j* 6PSG\Ƴ6(:P^q.waJwak,]CfJlJ =kM?oLPO {ru3u?N1:]Y+TYnou2,ȜQ] 烾O&3[<7.;k d]n!#~qܳ52e-Mz ^?ثyTxq]kgH(_.y-ƎW uf8_gU;% Ў3H!њ33#@K@ IRGR*0"&f\; \GP+cW^e^ )S[SQVXˑ$Q ˠm *Wwz$"hN((A)G\1tLA3CF@mLjf 6?E19KM#}w)dQ[am͂tA/&Z7O{!셻l!u<̑:_(rfqafIy{U43La8O|s$s~s#('E(<덦~k |T?W]̪"'RҜidr Q&0pV9[G+}oOv5BKYf\(瘀-Ul pX( VRJ1'WXwlxDSLow]&W>Jm|3!#@ߧ+?Ȧ.ny@;_fo:ky^G6 >kWe,3v=1϶8}<.,4s-ă&biudjU>VfˌY>ԣ͵qu:CG_wz&c`:gր\6Jqg2TJ$C`( eL`( eK0 2MC`(]C`(`&C`( eL0 2P&C`( eL0 2P&CN eL0 2P&/7s\ "p05&ԘPcjL@ 15&ԘPcښPcԘPcjL@ 15&ԘPcjL@)rPcjL@ 15~eX1X1Ūg}Ju }vñqu$zIJM.N~O'oo,EW~~i73( X W=F:hDF Y#[?UK9+7{Ù%]L99N{2>[_ ZwD$ &+6ޜP{}pPdj{*hQF;[?r/<#\ʸ;_\|*AQ<jR@!FB%kD7liۥϴyKϭ' [ہ:~\RRY"GVA* j+@ɘ%\!QXq؏y{\k<nF}9YJxoOLk'!0>?2-f)56cߢ 76`}.2H?ln zƑc^~I?8toou( , XD|TAzr|Nb>aJ aB_u)}"N9wy̿=ٓBcb V-^C@qj]V,@崫a@UKHWL;vh-wVʃ-k>0\owXfg}~}39Rkɚɫ@T*H%ǔ.tWԢbem D_.~ݲoYg~.&:$zSpYul@YGt6E+v: H_a>2O]oRSz_*(H7Ԑ4G׊Q#2rLD݈ڽ?6y.óYayGo|y Ǣ%A7ebq:WK@lTev 0˞ª.B&<*d⚁V\ 1jM`% ;#䈙 *尸8Ե)=%ϙF)mR[6 m@>v(JH`sQ[H}Rkda Pp^iJ(Q# DBE#jbDa=Hv9L}Ñ݃9bpR2OpmIRp(=:{z>bV!rUyk+B;h rMkڤxwmS0|)t(:H4 ͱJKׁL۟D;!xko]- I$!3xbU͘8& g*DY#8 ؐI$uRWA{r.H9j~NFvUldV(ml<Շ.X_MJBr"xAmdSdHr?/-3%Ov?ӛ;\]Q/f)Gk -^ᓛ5_){k~ތ7L": un"<1pխҲi c3{agTNITMZ "3(: r/е%tWza)kXڸΆGq[mtbkWk!33[R*2/jV~h:[ӿ+tt7?JRzZTS,:[EY`ҦB6K̩ R՗C `Ch$t^kkw) (ג\[Z@iHzbϟT^G''7cR_xs:íͫ{n߽/zsWoZ᝟=bfMWR M2JjYPZfBA#z(]mʔD|Lo2KWhe\>q4A"KQjC)JtPp@hU"9ૌFV[\.it*Ř[IqZkn ވށ<MSS$>m;n>ST4'KJrxWxŨT][;p̑}ߢ;da |$?)$>pOq}o hRv 3ˊ sg "LS*sil.cC%tilY4œl{/`/n?V_U6TiS;)&V"#(:]dVa"HtA;NnT| zTlH9/?1zK+pv,>d2pOʂ. kY|?r:zu\#{T[uqIi^򘜎r(Ebh.x뜡DB0*QYsSsLkrXnHk=*ѹ$O]rIȌS)uFy5ɣgR)B<'I``F/(o7w C(DHG4 eF1.@;AoQ[#sUZ< z~!R%"8-wY,q4-ϟݨ3dnmWZ!uH2Bd 6X:`ʜocW|/_']{J׼EoӇ WKwpYQ*P`uПL;PylBl#9Y \*/IaMb`"|L+݆X;vzeߑ%!N Jq3PB$^ȴעօ$c {fDdw$cFSEPNMx, Xʂ*g(noV!c fN:I$혒vv7ׅi+USEuϛ^+nv?\J&gz^p+x2\;~:ϼI@uv8Xh.P፧:c\k\ %҂F0 2^k$)6) D`s#JKG`%y]>{9VQ!S՟?܇|Rqk:}c7>7psgy)H/'׹^ &kZ^ħ].QjE/69A^AEoٓd{9'v=xZ,%[wD>eTB1)w!&0fT/_orztUApB#?jqZ mP7lIz5P?<۫B([Pq@?ira5MfbCҗ=,[z 0>,|s|Ab(3FXGtZD3Eo!<j֖QR|ysLT>NEpYe.8J RzNKl %xn&$N_|^MZ[+Fdt\ů /dMu1+k4 bň s.ô?(V %ڛapoy+X BHz7_<׵)ԂFy=>(+,^U5}&2e) >-{G@n+|η>gP9V|[**ԅ@"p1'ApόdB-E%ɸv+?us*J`x_UvHۙQMZչvV2ƻalgF 8+j%t`ҺX!J_N]d$9‰Xp,ːI(C&i)CcL-B(CA!2w.X_>0@ Ϲ:SdJ_[#_m!:S2\ Y+'PEZU@1QsP9$|$%I sꎋ@zA$iRH Gyj6yFiEydhB]wiA aOu>s)P@4*Y ,т?-[b)clǝ%8G2#1=(󅣴ܖQ#:ZXN5Oݭwy/'5m6unӸ8B٤TU_M - Ӛ`+u `Fʃ\i!|$=S\n=71O©@( D Q9/SRA+&>?8aJb/_yٹa|[W!qC^ߺ__Pn&B;_~k}8T|s1KO[N{y=M{#<9s O?Q"WXFut C6*&o/.3"5uP"'ZCuUsכ~nڊ8 B9vԉB{ !11F@bL<<%O^ P<40m#VىČ`!xP  h9Xgdt0gT6ilٍ7[|mr ^m]< [Uꈫ:ik^FFזNS(A㴃ˌ,c=KTD Bmє$lig)Dix@ r7 RHF%'cexZsv׉~l=6UYg6z޿{-itv\ARu|f_2kcKϥq8gwGKXrqj ﳚwW^ߩڣ楒x4RvVɢrqg]wU0Կ9a[49Dbb[cv+OnDgsN ,x.k.sn~9Ұwx ~x=ZʆyeX&."j,=ӞYvf:KW=r=4oo< ug@y7(jy'-NZ~ryLE'.Z9?jW\3.'">B٤dvѬ C1R rTz"+P%޵$B;dGK,nMrF?%)RKRuA{gQ")JfO?*02.*l-a(/O7n 9H\kJ=!&ji.*`wItOZ#ȬӮˤg&:{DM)3>&,| j U%QxJ;Hu$H\{zB;9-GZB c.(\ԶUA8e:V遃' Tn+!yV&xk#Q$qE1rD! HM8jnA#ghմ'Qié,O (!rݑٕT/M?|NGʦU9vHhfr QGEС! LqPHES8L`s*%S#a)F J=>X$Ay."tJT@A!T n;/Ԟ^F{f(֬^>oFC|׷0xT`-#4|u~ k믐S븴?l:MKk R, 8(P9꘺].&6, JpIKq)'IZ4GH$FLU'y8ONJ+<.6Tp:fu+ЭCaoWu]bw}v7qp>q4A"KQjC)JtP~iZPvhUXj#FxI.Rg:EK7$T26g_΁|gg_]MU>f,dY2t Ac]ʹ0)'J`PzDODF(1PuȢ0DN$FBgPvsqy +0(pHW6] '۰e/+feRIqyMwtv+2װg}} L_υMF43m?(OTH.ʼnW=\q 4_W>zJhL$ Av>^\TeʳmbEoQ\Dm} D%+յmE:#Sտ7"b/2d>[; -11{\\\^iʟ C/.Mo-D|Aks;﷭\~^b!A^^EKΤ|yu1zoT8Zh>m_G/QU  ԛ޿ #~f@,5p>!ayZp&yh<67^0oɺjIBo08CZhivXm~9\C"<m}3Ȍ~fg#a (,dEo!<j{iIXid/psy c҂8[$4AC9Pv3Z9j*m )}Ӥ5x4!;L`޹`>L'lscŴ4$DqŜi808JRQk"tDDamm g4yu3<|"XaHI+휵"yU %hpT5ANq "CSNS0CGQ$iRH $,WبC4J+X5uUhZ,Ng)PE6VdX 6eƶΈh/9gUte.[Ur'[:ْF6f!,DnZG=/ooCK-7x4樒Ŝw=`Ϻ-$xy#Yolۼ}~3ôϏR:8%UۿB8.q;)3r{ N;V$\J%O #ûbV|e;\Ùcvy<\;mwGf)^)6&y凊w^mJ I*mw8;^}-i6d\.AISr+.N=/.fBPSdY(nqE' \ȭ`[ ;Lgp3,^g <:zmrհ@;Ÿ39 ۄ#ꨔIE&ud`V{cݬ'NPiIYAN:\UL͊g<9P^ t'cޖ'-F<8)ƍ.%6QLo1ދDtciʴr8UKږfDϑ[`Oh͟Ruhq[GEt|sp^w 7JCKq"?﫤K`({pqE.wXX2"C.@Qͅ6d=\=^yďne&PV; Tx㩎F&--Xi#1HډF*hf"fμ\}z}3bKsYaxܸlҰ^3 zTH !CxH4(=B" 8s^ϕLϐT1@HGw 7oB7?ɯ "׺x=7`C>#/(܎5/^ULv3( )E92I8,P\WK`8sKpco~ ~PB ?EflM涕^Q-|Y~6|H@ [/' n/ f5{ڻ)wއ?۱3圉=lZsemw?χz3ݡ\joo7iq:!)TVrc'mH`Y]՗ 8bIvFղ4鐔,o!DJO&<wVw]U)ZD԰:6Ɓ`2Xxm5ЇlJMl74L`ddB\-Y-vBRt*%rhƜ6d䟲 c#\Bx"Rڨ1iA"[:j8R2I2/6^8feތV^*wjb)%ls볔ޣN@J>38Ir;;y9tv-YfjR> &$Xxz|4C(2[TԖU 1FnQXmyɈLpT UPyZ/v(\qD D1uΖtπ̖)Nw j݀q r3 2)O£Yc!늊XS*؂= tI /|zO2p.nEdg.ɰ4D!K1j7^LJ!o+D䝐"3]Ȍ<֌FIH1[D=qTkg8uOvW׀5qر6D*E.^PGNf%,ٞں0ljV}lI|ȡa#/0[ &6&3:,=%K<NQ63BOݑJ[;9 CJo3:A/U0΄/iKV{~k5BHeBB8V`2d .oWMySMt::;^o;y: =bBj80_^u0f樼G7,?4 P˻wZ݆HÇ6!sR&lQzI D[g#\0gQLϸƲMF{Ub]x^__R0"%ҠXw4j|f5f_WIn {ڴu~4<`W]uwn6u->N mwoamo J[iLӺ('m>M=vlQ̷'k4Th7:c-ߏLTG3'nb:BԱHEmR9aV4G'J}N*O1OBk^=}w5VZ[K]^[矪t8%."f &s)H8tk)8+02ۢ}= ró& .@tM]ېY~ɷ,fo5nhW̛[޾ٓ}R' |94( 0OGî*:Pvϟ]ANuhU׫caW_jB)MϮ 2[.y)Fd]z_GotEq<998>ilӤH["`:}is`!FM=/sZ,#CIVJE!˘)3K-'.KBQ`ψ;Om,Tj/1q?~` C(g#&/(_ ֜^(U $X$ TaM FdS}0J: x=IģAwbȜd_Äa0lNdԽsi N?ң1Z&SMm(1[&g9Z"Iy LΔ`Lb̉$2\ 5{Vx]}xĵ)B{]x )M,I6^TCi) I!xU.E@0 Eۂ:e"!qՖP(DR VFS٬tpj&\(tɅ"#:Ms @bf %(HhRJ(" oCM~HAGoTxL=Ek S6TЎ>qGAU &tz}lMOWh:(]TcuH `n_nк=PwX3K2]su^R)aq s HxOԌh=( hAG 3Z\jH)뤔IQRBZIB!aNcB 0QuA*FbVSFTtY$\EcsLiObHC iC,O>N?-9v ^fj"&\ͯY-Ӈo\* :)dV0ɸHtYJiRg_ӝRpRb< "u .IrV) WDEh;~ Uzx<+-}}f[USuMwe'[ݗ)6iAGiQңOg6_9Mj0Va |Yc:O c}?ihӚVj36Tf1bK!rE'=Q(!blTEmP vLsli}gY(p3fǣh?c7tڋz%0Qtwߵ2d"L7VA"l̃.u8[tr$K&_?7BXfMQؕO.ۑaO谡T&8_;ْsQ<)\. S%زή i i.u!kKZAT6oCsiZ8oMǟYZ 2UAY Q|Ml϶>,/ =a'ME$LޞRQXƆJ) S& ĔI$HdX2sY$cɆp"431l1JAb,UsLɐȥB JLu3@4-SZOi==%5=Qv,6feQZUG=?OY?ݧ.'uIy+f lR5I#6dR{*'w4K|x>x9A[bXO׭ǨS{IRɝE)B x6`@ uN&!:/(+Qy+Oַ<ٹu[?r7|TƓs 7} %N Rа_kR8=mWf<=/3^*M/ת~Ԋ ~l] 2#Z>ն8K ,>M?-`oO *Zz`np: 9M\ ct.|ϴzӓهWj6ʰ~9[7 L[]7}9.GӘ2,GQI>/ja /\?1m7s_?'~[fp>^yd3ְ j/߉;@5//k|7~u󵓬Cbu5_w[6Q\k?eJܶ2<cHwYK\޽` n^oq}7. .U/c%;~.9i KђId]b_7>vnƐ.%6o52;m }K%?Wx'HHU 1c 2P&4/a,X@ ,͢Pu#wc֮NpDl2YO:yW ;Iz2:2pORֵA[>k?X ź5l8QvVV&D*=RQ,<+)||J+m#GEs(^}}b`׎d'-vK-EYmM6ͮbKS[VJm0}t#7;HTscQf|jKZr m=X 9d ,rzj@L0ҥͶ>Z\̼]]<2:ahNhs2 mN -Yhj2EMH"BS)*F/!vH*(9f I e% t\^ Ym4vkv 2*lDdO M\õE(ӡa"5\3}/N&H)/VWZ%~?!ckGU5V+ 5>iAQrUvߤVLԄ1PyILH%M==g;Z1(dR Sgl­=M1AHIE(# F 2T6Y&b/"|^ u[$)! qj f*M+[ g#8/ndg'~O]n8i~~`T9fӗ3%&N p8AlxC-ђAp/Xo po w.f+0:IM- FX(mNstY:R‰oRlfb얡gZxeOƩMF256ڍی.^{ 77ޤyx\dˠ>[rW^ͫ = }Md+;fzFƛn]h~Rgs6CwN]@YI@'=z.\拳͊>xw5wǼ'|?_vUΖ DkL [em23r=bep{,s&Kozz._iУBR IÓLrsң#QOʄl oJX.xmgNs@%u>#bx"!͎RkKXL0/)}JaQ^*eΞ(FVD T:(5 TW& I[J-22M DT^t"CT{KU"ڣVzm"nꁊRR|ޫ(}:Fo3h0hւ ҋy98 mj+N9rP .Tx]<,jˑ縗* EźNh.!!'Js)-7IkK gilRF)uVTjّ Z` Q "Ohq$~'8> = d۷B3A>2R#daT3FeB%~uSI(,gRFCjP+LDߺRFSA'k(* K#֚dNbPPh "8:JV&JГ]#L!v.xW V6bQH22 4xA:#t=nƉeГ0_5FC8B* 2,eq KkFY-z 0( F!Q~D'ͤ(.84[r7@D`#~ w6%[S$HCgiJEkD@LpVb&1pC9,*cbWnky*u^CpB1 U,2DzI߂6*jB6*Z陎#m=BLUPh;qu%*62ާAQ4L_Q_Q#Ԡq).+u6^G_k`W7EEA_ծo}m ZhH޵ʶGZGBZSE}ӕW}JY@M U@rxQr%+c[Z7u/}r|TƁE@Eh|*KG3}*9P&KJ.ϭ5Z/B8/\t>y5T;y  >ۘ30[dXє d[b-}xhz¼3xwNgoӸmׄ, 5t5ɇ<:y7Bv EP3l[šM}[]Ų{)޻ 3iw^Kݠìpfgz3NZ0/V݌]yAF7˛wbŐ@<p_N|Wpzٮ,Q q*Wp ,xO2OX-crpw˼=0^.M /r\[wzbůWl[k*-#Y0Txh||H!m@Z8RB#b:c'W Ȥ=그E5D©Ȏ+L2S\hծm'RQ Y:9tv $ވqe5uQJf7gx7*ٗv/ {* Jmmtjb2JspF{n9bG@@Zr/Y$sB0I jޒ%(ME]?%Wp]^%ZZ!:YPYbBΙI"))5Z{Z?7ZGPCfi'ڻ-Cޑg꾸p: -].} ɷ389yC"@&1" 9 3)H3Y?˷vL֢CEL{d s,j)B2ptUks!fu?_X f t3M/hMvRȏd@ ~%+eAzXI+mRHF잦헳"zZ/x=]]@P@a0Yn'&(^(֎&]^]"J$ڵ<edGҶ Whp zP-fSZ\O\ |tm(ǫDy"buM;x ,xdZyŽVTw[#잝M{;x+]!ΒmvAj#W![68NFcaXb5 &lj8n_XOdVU.Az$z[RppnG9N:t09[!J>gNpIDo!`Fy&6B& F{ NVNB&k>-R 3xn- Q*8%Sd2JbGl< %8N RFդ\W]\4uE+t}V{{Z^\V>^ĈV3U ![ Cp'Yt_͟jGEY^?*}<u? cNm0ڧqP;svfkG}U%ѥΛpШOf߾5/ jEW!d{U+K"&3y??MfMګ;Foyxp|u6ޏƋu{j1$F\+R jx?9h;6. 1whΎ&k _wĂo%cӘc$7[Gʻ)&կ[xr[z[AawNٔ <;NϚKbR5YFnRxr3MG]\%Zm X5 ܅'J5 ~us/>^//=;h2(зM/_\2QMnI],G+9 EK˟_ʴm#:DijyYOL6Ma I42ņ׬~_(ks>};- 'oo*>%tg`w?v~|SFPHKȽ,("6d='i#Q|G_aoGtHL2 ^(SʁRy#2Β9# T\P*3xfbfAUd䚪 v+k"|aXrO{.aXUC,IbPxFLx6L\TUoqTe~D&Tj#݌ޏb\цϠa ͮF /P^0ai4-vQj#&#oX+#YVCfIPVhϾ݊@O!idU#Gu4+IrJrqG(P-WRui՚ u-9s8-vԗ%И?WW ]Z+X[Nc+3/FEfٹlE.vb j.'Yp%7d^z#@G OGD{d$$sĹI4#PA y'Wsah[;v(Vu_dgu(+v|,݋f )~A w^T,ӿ?><eʃ_:r>ܥ|)NJ  uh3RBd 9Ji1 춭D,kudD3.ʐcBk+L t~u>΄z9=U/` TNH`]otw(z͇l[;u8!ǗGQkZjq|>DߴD9ٹmN ǎ2r$ݓANjsP)#Y!Xq|(|N'Yaݒeq=2Lc\/z0Ĝ=7Qj".2^e"]%2I 1+ ,#xpJO7ve8C:I 2i=F 2/ΦA||"V٫LD̴{Œ> DE(83@QnEZ|t(řH$gҫ#i i,JyV U.!18B e'DNJ3AXa֖IhlRF)$sVvLgs nzYJ[8%f{`RF%g.'E"cM;)DYgHK˨'-3"B8Ɛ&Y4*cT,tJI*R=xΊ5ZZa9`'Vw :YxVYZ.x,8(1 !aNb0L,3Łqt8zծ3NڕlW65Ct icBq([zeB˽Y#fhR:#uu!J4R}!PowyhOLևt\9%*1E6rK(GÓi(@ݑǭ2vG CK1u^]Ks9+=xdgv/s؈9mOImIZz&D=He&P/?Ln]|hp Hh& 6*ԮK)#PsQ5T{y7hb~>>%;|ﳌt[?V.vQK]fB^Y+D@Pq𢨐 H)6! Lʒ`ME}8m=BB]y^v'/SCÛhv.rm{ۆ5NP*"؂>"T\tRRkͱTXW,BeSkjhOoaAdk {łN~S Op| I0q;W5[k5"(f%O@GtL L.vwLR7w̯Q0#vikgڙvikgڙ6mLvikgڙvikgڙvikgڙviδ3mL[;δ3mL[l& kU[Pܪ: sܪ:7HHHu&y>ZLe@lL%E/XPDXb d06Ljn=e/5̒NgkX˜< Sx' .A$ӥDžF5>Ze` VmtӍ~.YϏ-mp7w ׋& |<'I]Z3z0ӣϓW1y7C$t e$Af)%+%Gjm ;I>~Kt,<(6ē*G<5OȘ| FaK=dItHU3SʵlHnT:mT2$jHzQI '̓@6O>|z+,`wTq'wE{3*eaL'8XF>rCjZ5%cTzrѢI|N&'-"D1fq@2P7WqGQ]{}A=;L,./W//'?./fP@ʪB-Ws'URɪ6QCАtҔܺ"ep\x4v;摃⤍>B jc(L::U6$L) Yڨd%!|H~ {k2dɘs[ f%\d&TA )D Yg-6FF@ CXIZ&i=]'|yTNEq%"d{TA '9P85{IPxC b\dg!{(1漵 # zFsXTk _F*iaˉƁ+/55r ) T\.e@>}#E}jO$hQ"tH)|IBJb͊7Z/R'#7ȠI{q"x,^܂&`$ɵCC\Y\^5{L.FK )&/#\@/XcCK7(R1 ]J$Õe:DV1,I*X!xmP &Fg1h͉/C`B&)#)kep*Mnt hWR1Qi"h0U&%Bv[$&}lBX\=GaBd8<}!'n5dNTV:.*_-Wc86;&s1羖-"Iqd6ddlc ?؈5;]| ɷLlr߮jOV! 5W_ 5iWc>Nay,f6Z(bp|r3^ONNoXl` .,(($"$Y{-ud"x᨟^&|lZSV[:ëMCO4Ћ\|OC"ԛ7̀|EoL-9O>}˻ƃkћ띎Ǘ8zsj|zB2pu,$Po[{A~n@-5f0Vqo/6^ .ė/eE/ 7쮅fJR?^h == >빲 qmmuY`*s;cI,$i™ٺ$MozAwάwI5֦!QlJRϿ||ٯfЀ%=p9Iσ2+{wAw (0'_ O;z'e#aIoM9lZt:|_/ꭿ7y(s"~jNaqTtֽF&_!Aޡ6:lJ /DtUnP.<`i﻽Ѐ YU?]B^ ݦ_xtb0*Sޑ"hȼ N0&qݛ!Z6L⻎{?7t~ԋQo`Z%qV(}N*ߔZ ppU*m%K!FD:c={s -չ|q*J380iGfЅeJM()b4rGX0خi1i4E)i8hc aܐk#9ߥ--Ym gYd!Nt+H/Wv_r">l-OC*W/J~Ҡ3ѣ"M*\f%΃f蜃: .,w箩9VhΤPM&N>2R]Mgs"(C`KoRYϲFHDI  qLEVXJlZΖ|vx q5Zȶ\BG@d-YV3@2BMdO/z 9g)cV$[Ϙ")d*GMK(ZumJMЃ9jH:;i"e<,K(LGx2خ?]4!y6ƵQͥ&VYDP:Ig2BNP "sUPUY("`v(E1&g'=/G F9Q*&G\9sY& qsȜ:/T;)VC:3A]__,k6eޞgl{6p}.h8 eep38;ci2=M48h᾿q=y^ |Fg BQJn;t7Ձg~Vn3Yd-[d8x'dIE!t L,TЊ\X1X* ftviA&AT$FBBOqVpZxDnӑY3s-i6-q/Wdh~gy`1偛6KOq66Ο.mRf8@R.$#Agej0)qu0Tk%nUW9FGrs8.!(lFύBE-=1Ul0 g3߉^h8UYR->'bQ)js[ZcA/tu_Sc~E&:mFsP*r0dÝAqtƖ 4KXrljn t}\7 ˋ׷ۼA4ܭQ,[浒a2Οo۔cU`CL )(Ef)9̉L\ڿ*Kzo󗵷Krɇs><8 .g>89jC4)1 OfFYFVcTސhl 3N0E2Э/?%"oh}޶EԓQYTU?:ӧ*nU}kE-/jG@awIӶ2)ݜe2]S 3^q>0EuUۄȢ֖tQ1$2I y,u- )Xv֦, уFL.1ٜLkI|g%ck,[D)u'?v*?#*6, "IsmN =ݓ4|??O^kCI+36,OE Hm!$)uYCp:G,ۆsA񬍖N$e*dB@fFh (lԤY )b{kon:x񘳜1[ ]d&"F )D Qs#HUp3eiV&)FD( x쑚zWuu.m\m~P ѭ1j1os0陦Jl=6$'OXO,.'>) NF#q|4JbE!f>LUAM>YK.]N;Yǝ{Kgn:/=E0,,oX^JQ\uԧSerI9-\ug^nOL'*kLt[}'тwC ^.C. f/̵Ⱥ-r̂ # *1=^ktSՆItP{Զ 9-Y6t\j)Dr]rk)^VƷ>tUle}緀#t5y13Pھ[.e=J `P5 UŶ -jT۠{9 |ZY&U!s2#aby]Wfmx'uUanaCp[C7E oKrO@.7FMƨVLA9$!V3etB-m'Ox N!ۛ4m7mZnEgg"t&~u2<'ERK u*j8O,hL~PeO"clHйQFDf5 ?Qx=\Pى!IpJGM9"V iCMt*:L;t4D{Rs,z̝vX-"ro0CJf/hlىm &H^Rۯ?,z&(_?E7-?}l_{N+{iN'WB.${jOŜdp&r=AXO$aFyyc{ZGdJXgEPІK)sy+_Be{ZQ9 As0Q؄ A2 c*5 tJy>%#Kȳ;#] L+`~ֵnu_@UVi~ Yhn.lr)""XD'Ekqʝi4ً~(<0Q$`AE3!. AFt+BAM|)TD2'Tfi%1  Ay=N< aJo +;6a"(NQv(]o+3zm,2 Pd!D,*/y̕o|@Ɍ^WWl4qfT"{[]s髷ˇe6L_?g+PtOqnkP/Hb n<%06 ׊bۀ')]'p×WhGDFɍ5>D[J.>:Vn}4C@ܡJӥ>*cwރȠ~V4؇*:) Iףd `ZnffbIoƅx;w]8aŦ=.= xmdaIVC0sXGv7|v4.wIXYӦOuA0.ͣmXYVn4٬ oQn0IoJVq*x}V/axPgCZYIv\[~Oq])#|z%>kEKA8Zs!D!QŠ8U4ABl}ؙḴ(y!>bGA(b60KNxo%0L,"FC1TlPuUko:al$FƜi3UgWguz_ԡAA\Uq]0_{" cAR ;[uRq 9$$V8m0RS- Fч:-RLNPv8Exu3 ׎<<]` ЖP QIe1<:8ȘIeq%AQ%}^}HJD#bS0p;(tQ[qY r Z*Y;CC]OU)D;q)'!"2W@`<1pyA1̴R"bVP%J F\ -Œ5[)ﲻAyu|\!E2 :l:C=`QfJqWOn-`/FM\N*u5 Prz5մoQQAlfN,clU?gqZpj n=W 19ϧ7Ko]L>bkL OI`ه VX( AQ.jli_|cgmI 9Zr;Afc+H (08,0P*?KR[Ƣc\W:{`=mɾ<"3tn͜XLjAžJ^ /^w bz{b{VK6(ڜ-rm|&zGǰz"v(L߻ǏqA_e= /'ߊbGr%\Јt @X3((dQ;گo!dezx7ٰ ŧ4_퍙_S 0/{^DUBYr JkQJ' h?4<;F0 aIB(<~/^K+E QgU$ItfMs2Oo+B1.k>e/p)"Wp Rp_ FyU96f5=%Vۈll|0]b?|;?VT?K<ٮE%-;h8g݅OΎmujn.Ͷ}~`\/h|;k&/*t֣ʼn/, Wzx5"6A"4OcGŮIȚoyf%׈JRK⻸[(o({a9+UK.דۑۼ67PMm_Q%kOoKWs-iI7BZnW&/~_o6yEs§'wSyR7ɳLr+6jMLѲq+.hqٲ[hMZs +$4~HCpeu'֥YMd!\ t:0Xr9Xm.B* {fhuDl^8])y1ӔO4,bBD7 €A-Fcxw 훕6ĥAi2EK*A`PƘ`֒AٸއH] +hk:oUB0O j8a'1CE 3T>9|c`i{LђK)QD {"S9mm4ڥv zXk..:cEKN>T|)E+J$`Ũ+EӵRt]]Ƴz:JS%yW-. %B_ҩDTw]]qtu^@TϮX3loPWAϤt쐬C]^]Lz! DOM*oio E[eIy9"6:k0cP𗒧W7o$Ȉ}P锲4b-vkQR˘CVb"ǡp_ un d^Md (uRX]b8j +t+6hR*?X"p%Ƌy ,cN8D`9њX'`s$H#u 8!cA Α!RlsFzTBǠ@op8}}Y_g3AT?L]5vjfX祣Qz/)j?iwMlwZ[ZA:NGmKao&|n⋉~ . iNÚ4`k MF1F: lp6HFjRz\Tknn[nY=O͢nTn~v\QunmFԞg޵B -"^.@6яj1EjIʲr~3C!IICq$S]UK-bo$zYi髵xr;^]:v%:m)RZBUJ*eJoJjҦ|.P~e*ʢ RͳZ=SlU59#s(| iq@^F$D*3ƒqWB #Asz,*b/}o{{Vܩ"O)DL<xbCTIoT*C`G8'&g )u:kla 7mյQ[AaV&x۫tKn=:::9:zJgJ(MxZkB9PBKk pZ!Tp6()Q)FXB׺{vz/3PQPlO٧"njdzed.+tVO*;hON8\DI-ؐ?lj2.]˯-`<DBD/ D,BΩq1^ObObGILFqF-yQT8)J$I9 (.*V BB& ՈojoU2 @M$*\"e'akwVޅ\%wך. 7nu^b՞/3- &$Γ!T( . %4IMWD2"#TyMȿ4R#[`i**ku+3&'I֌ɠ39>%5rKV|9>5^|ל( vEoYȲBP҆2 ery-iCZû6}6Df{Ls_$:q8| <8w+L;"r )J`~P;x|V6;`G[oǞq[;qt?|srCn L } {p(K$F)RYL=ٷswK;Ex/I?_?gkʹ0+FT)wDY)itpFSN D[FTP$@&EGьKnˈ &9E.efry_uN~gQ0ېm6۷B2!c|zWfmmW%5,Z]f_ewyi.g{zʆd`my219ުfTF$G*cb┡`:&YtMJi*r)K85ck׌QItakT]hZօׅGՅȯ$UC"B}}&7P4s=,k4tP+5Q'mblb‚*!A6l g˫6z=0Q.x#@sah2) Q !~|L2hڐ^z{C2f %mp 6)4,eA#@ 8;MEpmXÁ@/ivLI{v"Bs*U^U't;﫫y_At9&ՁSa]H4DWrJdr'Z=M9ZPƉyT([jT{3zu u{|INf智e 3{dߐ2k. l]?.ֻtzf1ݳYv- lp}z^irVsS2c>:B[Ϻ=יG\# Yu6,b\wU[[yIz:>$&{ilsm,\K]zKH츥.l?:!Sn4F."+č =0.g:~0?|L'(G+w3zì5.tx=_ ˯ b6 3k֦Wʤ.wEYerIZ$BBC"FF,1RL(qc(%jO8KNPXr7^@6ӓW qXT7<2Iu"aq; f3lRfԑy{]H /  lSIx $e;e/:e;;ьB8cxuIZRy$E CrYݍC5t A!9D`p$d(.i5%wpmA0M!\@0 Rcy׃ &2n:%G9G!7 gֈXIyd)=_<$: cN9M4;a\:3MSfTggK7~Qc#ѹpP/poՒ}U—2W (/?H7dF竣[n4 nTL=ZS/} go~{#S̆jH-wK 4X!#̿UVPh Wn<=Gv1Xi^l /~^uˇ?/p9ٶTMI&k—,R<߮Ulq}akl򄯮KʺUw}"Yj| %UuV$ՏGwx٧3Mli\ܳ2S ݌4@ipg78Y0 AlUPB߿_eT%n\݌(UMYoOc`\ Qu":E?ջ_j?0{֣(6(slI?T{X ヽ|Ꮫ/d%<= 35ir/~5iWUhA`o=9Z :YU:-%|=&NTH]P*P/ |(ZǫmHkO~;Q@-(R)4OQGi9X36yM!0+H@ u҂RpңVP jbMCw >MQَ>f|[Q܎}6![/Kbb Vꛖ>\Cgj&gd Ll[.u»*r.793~G7&Ty+qt`32f #W?5.j=JfQ.JTN-2PrVk-ջ ׽C ~w?>lM_s*w彍W }b`$>m%Ggyx}yH-i&bwU(y5,Ցpo2X Piwˀ|Tr<_POT}7)n3tQ* {bU2sM ?ZՕx7fwԛyVzz.57>ǧoy @un93 EOɇmCNƭYGV"~[<8"L3D3ɇUm qS&qNZl=D1d` %%w^.V@)iB$ Lc,-g!)7Mw9 פC!:qeCpxw6bls/:"=VfE)ĸ1=Hp)7v&9-> B =NQAZCxjધ&b/5#K5(* %B# 0,–HdDԈ H!UR aEFӎ3rEE`T')`G#Qh!Ͱe))" ``~C 6R>}-_)Q5RijH02qlD^ȣY2c8jur'烝tZD:(d ("ȃ\`l̰#8 C]Ѣ@ PDaщU"x+Ie:TVƈrbo,jklM_V$6)K%FQ mm)[Bu"}CZ`>K&r^kνXh= Ai\ - ݉ nvsC+׫U[]ՙɅOwMI^5@" o܉|$9~S?6S[\ CatBSfQN)93Zcdl;jD2JGHS`VbӊyV]@*C F: lp6HFjRz\TknL2D@S |OW![{2:ކ,-UfKhϰ"PY4 j_ۦ_ؕhߖ~7x Mݖo{7ꖏڹ6>:b Vfpm,eAb; !-5}>1)[n;[!rFAo `hn)1yJ'As?A^:yl0n#QW}g?P`L %H-ݍ%T8 yajbK)9 Q u9@'g= |l!mo]94-J9Q(#H+سD1Fcd! RiV+Q b R,# ') o޾]kmf=ƒ2x=(vüقo4 eazFQ։`&:+DY'j{5P_a5D5R4 :u%\UD%GzsN99#ul0:uJTuT+z Vlċr!܍X9_%GY|i_Fl9a #D?!~w|,zMC 1L5T ]=\!HM'5>5 'D-rPӯPMK<'u%SjA\q62QkP̃z=J F9U"WsQW}De+-$t|>`"sQWZ{tlƃ G#[$5&lWWǓO&xj O%푺*:P]qSϤZc3vűMLB_5꘾"#/hrfʭ.^q o7X;T\(5jcJ+ (R CxDQa{E4An=e&ig Kt};(ɍ/LrԼE~PW{:ft~RVߏCY!}/i/YCPy<_/$l8`3@Py<_/C+Dc:/F a!/5έB:)%^r= 7BT9 KAby$Lj< ?1DbV&HER5AHRVa0PeZ30{-#@MFӔJT/ TXNٸ 2;*Idzٸ@זDwe.Nf&/L Zft]‡w=Y}=uCIb>"%{~cuZ2 CZߟZepN{:]_ihBZ}Ϋ#znlG|Ϧ"k߈={gW(lb릇Phɚtfos+L6'9tn>MҼoWn,?^ށnVYGmΤ;Lx7Xd[ϖѕ!8Fd* j$./B,dknulC PKgz-__&^` b XE 'tH# be}0z)#"b1h#2&"}tM {u\1 ]1 P}+R55;1lT o>/\TL{r\84_4t 7Rn-3rA@ 3IZr2@3 =*E&g`Z'@?y[X]}=}5E~ఁՠp|I{@.Uv<=͎ %QE1*6Sz-vGEIEADaՌbJP-WzP'tyb0Q[xjv/uuLڝǮo4 372=>C{]uZz۹^'rOp:}9^,we%gKo^:n-9f%YnDdVX/g=g=I8PB<`m"‹!M ߳AѼjn^ڃ}iE.heA&Y'oÀWOW%鳩%\$jY$*ꁼz > CN5/9ɑ<Ǹg,HԔ6sZE߳Zw]1S_dZeHَG31ղ +]̡ `)e3oO$XQ'E pnC_$\.S A2 ǨU1kX | \K0GGgotF΁+>}B"H]Kkv@շK;W[[eGs d!EtYviYJN7_3e XiDpݐH/o:\vJmSu]~)TD2'Tfi%1  Ay3^w< 6(RE_>S !O)MQ!n QFMaQF ? /nX^ji:DB.`l_ cjVb_p:0b(ռx:*Uԭ =ƋD)V"z)W&%Z1݋p~j$E%Vg0~Ol~]*-PdA(/MchTtQQKz*mӻb[P+Wm"{kx{Qe+-ks3\@ET?d.6V]S>!PPk8oY9Ҙ>f 9OK9SJD}.OT ]ŏ3`HUʪ>W__RJfu73uFkOo?~]=!/avdc)).ك}@Y [?C/?O[6 9uht݃>ʹÀ!Yl 8+ I)}7'|<;m'Mfkc&m-ӕ \ gkh]Dl̲Jޜje{}$Yktbj%UuR U'wl:}'joci8qRsB54!܋ۥ}Q %Nb*O׆Oxdͨ?.NY e+Z8^,ӻjCyx6B<EVTa֬((sF,IߗkX תx_H&kqqP0=kEKF [`bPDP.HjC ؖfꞪꯪ뒛T/!i2ILq&| #&PlG5$" Jrd<ڝ=T *LeD8p](>)2qХ OGmsي]. ~פrb)c\ف1zG O# =N\T┒r-z'Mr؀dlB2EAu^οG֍qgPs(ixmw Y,+9$<ԁё2Rgli7v0 G4GEg%80iqmޣa9{nH7 Yjou)# Tyh $!* T$)de EK`#xpJKwN%E-rvCTdGe&75T@w2錗~UnY6ŧ"y7b1zsXd,M1`#Ip%]jIKpstgR II"yZt5JyV *GźOe'BYƉM 0Ikˤ Ad4L6)iO9++Y5rma=k >rx DV hJ\NE"cM;)DYgh"PS z=}d)2#"dic nE2b tJ"sVGZVaγXƳҲHr#dA )A8Cjab),6QEq *vO`M/ИP$ Gx+YZ&ܻL8kD"l&/53R'ݤGh+YWOP M!WNyJD%GkܖBPR#-I hղG=c]\tTJGZo:@e㣿A4[ i@ğZɑM_|R⥻_٪s_4x"ͻъ(#:)YYsW,Э#6򯶔g\QFs\ky* `^/߽;ZL4e61x>iSFL1@FERћ ĥ~ ~JgM&(.tOge/:)4liGi>4-r7ky<> մvOi#uξotc[]P]Rok - F qmHHHKY&֝rzϯJA Sdp]Sz (彿HT }'aq>s ߇ (8I@#$)$,vxI'N'x ]TlޛN7yYn?;|VN  >ۘ3* Nh&$-IhEd,Z&UGooAv἟m!da}pSl{Ǽy6HmW[n!WSi[50J ;l+ *G8OD- ͧW\-Cy"$]0F8LVyg`]j)׻8zQwp5Ď6l<(~/Ft|_rwb|20TP1dt@12$CȂY?څs.ǡ:S -%iZOg ncW?_c'B/@j4).@IaoM)z(MzKaLhu2GwV#_Ur 7ܺO$ux#U.l & HՁ''e •L+BSh] 3˺%H0msEBmHbr{9h!K0cI0*\V#g̝p[nQu8;;uwŜυM!f _"ēycF \lRMOf+pۯ'H"2'K,KT2!>23){!Rneet1{ :L̒ R=)`Pl.E38J\"LӨG(88㹺TօƒߊxC<"fw6,G͋йh}t9qmifmI 6YJ c^JdIA ѓ)[#%=$>EB6AWQgR ,&*YЦƮFn'f\EkWjm[Ykۦ2*7ȁWm 2iR1IӑPcB)zl>iT&cweΘ@PBfD.Jk Th4 |2IZ&iM솢?ge*9ß6p=bVZґL,{ՠbżF@3`d\ LZrтZ>q>2dNevB0a!3]IUIu ,,R)JE7W6 BIuk³i%^. ۭso:::;:mb68Oʥw=hY]T@I.qJ&GxS[VJ8LfVvV#g wN~P>GH;'[[n;mi9fk:> N&d)a QK`85sR1H9k1 V`9fo*Y*kwIgOgq3%n]U(zd2ϛZ:;Y:|r뀻F팬xܦXZl+ey$R+ U% RLEBRP++*U}AAuGU!XQW\OE]j*TZ;TWXIT旣%[G?q}/,m?B-!悎3G~?S̀'ǃ Q?;{q ?9Eh̯gϗWW[ZF٨f =o{ո_½y"+ݧ5yP~ jc˱w]t/`SÍ1'dVNKBbj ݢ)Tm{h +}לH=Y,>_Ɨ3iD'G<7F_Ϧrh?c|f~V}BjB.8 F ]M+aSPMkcऎ`x2ꪐTU衫+R*ߣ2KOH]t dB+Y;{Z tɴQ.5SWW|W?b >`ys1\㏿Ǟ97 N-z\>d/yK^ I#;+i#Qr4*|3 7WYV@Η!&4?+"E;Qv=v&СBkSJ;lZ}V_`"!E(tDBQ=!2&( Y=K1 &sJD2'$ KeD_LZ1C&,A=Ƙ fQQ-rvg'h7Ran)@:ʖ -AU- ?1 Sh\ )Vꎇ|TIpmD7vRxW5xPeMlF:jf Ar.dJPtQ&\М2KU^қ2)0 '5z/Qb:set^9{`Y%in3 w]-[IM4/uW.Olf rxF/_FwO9x͘o_idG#W}xP*Pm~rsއjl}Gq'R?qȖq*шT@ՅQ7\\xB\,{\?ųlVY + "ov?~}t5]|5n;,|ɽ~)KaSiEJoU`tY&7!Kb2<=`6j-(I ϵ))!7eor_@3==-_O{8 'qpѢݭ)*ެ Lj|a>k4WM.77|D2`6<`޳n# VW[P|wc^5"I[jm|w:^ߟJm WdthZnHZ iqeXYoTeafąLB}q;rl؁gb}; AGB6,^1n{L<#ْM==Vx2ԅJku!:i_//ηooCI((b|& )(';e;eow(D>M*IJE{]JBEkd SBIdCŔM*+%(s$'ț"p&`0^+A@6D#ryrdV粃bV{[t.&sCovY}V^0ś f[«i{ǫ FLɌϦmPM*vM҈ nlmcò܃V9ASB:GߓB:PJ]m`綔E8lg=܇6uBѦ1I"&meZ2tV,$(ж01vJZSR"Z8|^M&޲~zY{F۽ 玹lK:! lmRGrVmITWeqnjֿj0^lmXKO |\lsBISQHͦ"!yżǶNcf Nt^(aRѷZ I+tx:euT_$̎'&5PZ,&[lTMg%$)0#]9}f'w_~l "Vs@֘ͬw!e@pE6,d'; !*0ߖWZ}c^l,f+Ox&?(8GպPR`dZ U`tk[&.H ۣo3^M/*>WWjބ~9eMD'-ktGl# ɧ-Z#>aBuToSʥ=cH3m_1<Oz:2"so??(ءsVxt:mu_c&SzL3=gAxN[]Ho*vFzߍn$cH\y J|ӊm(/O+3}{/-a$ObH_h칸{.}s6;X24Kꄯ| nXex{,캍vnBHl]|G{{8o'? a,^?HY>:kwZvZeҪ BŒc_72pz8- w@dIz>>˻`VnD#\u&UgkdaF%}95|_}RR]΃y`?]҅<-JOoogUOee~}7nE !kd FZFul 9oWByP{n(M>psTd  !&sQA*4&y crl(g)nU/T;M<0a5蠏\z2MaXpKa+]ð!k;X^ u7j|xOYYF[+E6l(#Z-EgGg^:CWLJD$%2>O `)2d)1*Y'Խ|IX*C6z>HQ2sBC2@6ۣA_où-̫Ãt1]`bQPq.I`b,j#YZɁVe0ANN)Hb c"0mG]/X$ k 3FJBY{ ԫ*a6hf$] uz <SΖWC=ўs!&i7ycΨ"+"㏢$SQS" |IJG! ?$:yLbK7o:^SQBGǓ ة'LȂez: ׫gݣF}.7J.\4chGfj `Q"V!^[oٛΓsh@U$B+:_zٙnwW7i"֗-܈p+F?tڻQOƋ,jqvtҞ{f{чMv5p:kыvoGLsˡLX.OFsoKb:djӴ *6ñǎ7r "61DՈ?cE?AZ'sg׽6W"~C^ĕ~ %YiJJ^h3d7%!XK-'j^$x]-W ZKCب*ZG[*(MbuR"(s.+]n]BTG?I7~zWx$ZboŃ6ZmW`kWKM)z>rs R1E뛈b1DbC;;CHƆmF!AF-$/FX1F֥D2X'UJ6z[*f&EfMM\B0KiƈXWpnkUgrPx<+G_Lᙃت %ʫR_H4l.u9G_Yhy- PG &Ǣ5\s{܆Q M_|[B#(ܤ|MIhPd^1ϩ-U 1E~3U(eL$Y GePWLJhGd1@=Yoù#^y\ ZHG ; 2)'QF1ΐ IP"6J,}k2#© BhbٙK2 U7H#:{J]"=Io,D䝐"3_Ȍ<H:u8+B( yr&^Ifk;hLU`E2i/c(GNc3풋`QmyDxl>Fm =Dv6A`"hcp){גdI2v[أ?X& wF!厞T>VŚx3_Ҵ4K^CHГE@l`B惰?)>Hwc]y;}5߲W}D@ME4 4>ӡnp͕2nhsYwG! w=*}Kb9)@*^x2,*-E.CN3(mm]܈.[#`Aeg]$qHEiL  :g] @#(`<|[/AT(M)'P% ܦކsex:*>l=Fӻe[d]_˯57άI\ 2oc mmѾxXy5,Mz-^Adpn[ƏROUzTvm,bi|pAB jt,H5`g]`Ԑ!LIj@B.R FQx8h#qPdgùtJ?]Wmmg`z +g/-ǺU9hKK%D(!-Qݡ+8$Ծ_~(Ib0dC&ᛨo*&TђPҫ cNX/>Oz ^@蔃a$721O"5FBɃ Qft鞧AɎ]ߺo^aͥO*»v)|A+xRӼ)+haWQWWB}YG!}i]ρ׸LQ`ظI Lҧdw޽CJ֏dR:)eR%hХphH-Mi(T1,DE] )yl=Q$]& B=ކsRê.miw׬>~ė=;z`xr'$b>f 褐E[$"e%g&Z]o9Wi;`]`gOEHr-CۭݲbGj6d},VaSmU69Lk(!x0kSIDk1d3"y 6^ms|< ⟳!OMM2Ũ0VK0&ipĀ'$k#6q#%0$5AZX,@)v`r+ 8CvYIhJ^)׷Kn_l bRyb&i^5ۣ 8yd@T Ulr[LZӢ[j; Q ؤkJ|i;;߿~*9|{E-b:P0'SℤiDPp0nGFn)9GS~S%ZWR>RLzSNq飶ěU35FٌYaB52g, sһneGb&9vꚭ.Ppyx5}刍sIŵRYtLȄ$Ǭ `Qa) Q+<KZ? JlR!`V0/2 #a:0- 9zFٌv"('8=r)Q4}ol&f<6dYƣ} bqC="⢾/+0BB20[g($^2XobA% #`&tUͶ^Ϛ555lJ1?ёv: +*oGY[;m'ʎdq$cʥbT̉JBTNT&i}'*/D%GBSvBps<+ѩUVc$^"\)%\է\+V f(M4AfMVL~C`uN@S7.5>=D@t?ӈҔiq[ -5:!%'I\Na:I)sc 9>JJ|*pJ',2\@RQEN@`vBkvI\O;\%)UWK LOL➎1GŐy+eדWlqxv1fw]&-㇁ݤGWd"|o*u=aJW*` >i$(ql)&Wn Tl?ѳ5Vl3;:;\s7vt< -9jvW 9.@h㺼E,G0P%.-'3XTfI_G΁uz}S<7A0ɵ r:7A1qV{tHb;Z=#=1rY buS9i0dG/WɏCt\~,Ӛf[fx[)B,ii.aR_9-J-AƒEmCNbK)2*eˈQFGT!jJqTQ&[GEdQ0A[R.k%#RHD#۷/q6~EcZ[|/^Nad^6o0YNB{_6+-x=< fs ꣱bC@ChrYT%Tκ u@96Fx%,-OI5Ş(--2JG+q9aѢཱིq(ׁaz>rY t_INg<`X׋nGl?@/)CJ4KK.%G{F9w^+Lɤ:% 1EX&5r"1p+iWzagxe_Dx DUg"4f޾,\+t>Y6:AM_Wt O˺9G ˬVWv}kVBJv]ޫyQxsMb-j^*~` u,4jzZ~JP OC,z*OGqV%,z(Kp3"!k/%%T)e6bi1g;Ja8҇ޤ >+]{^w^4.X(_:%Ղ[%JYּS>mdiˊg|@3bf0.tS~p(wL(wg <#b؟[H5替<|y-:}DL>L[eVK7mSbϙM=x.W >ʠcJä LnGV*-MI>If_e_M˵shAZ(/|U,jw=-6Ǩ "J)(Z;#V"" 0QjFNȠe<g;cɻxl&Js&J#=ҦǪ:w,d:՞+popާ|_;u&{Iػ'>@mYNO7UY]-|'3&BS\W>'OX#wߎA> =3?~4iޯӤ\fdQ,=@ҳjR^g~ ߕw٠v>d/*f"Q?@ALp! mzwGu#ϛI-"LTaCvo믪+*Qԕ+?Wq}^-갿pf2;oXԦĤ7*R9X;E5j#XP=Ö@yD+""oG&sQ?jǓ @oh X'Ӵ j6Kgڧ:%0/34..Ӻ`~XI@hT+XP6Rh;nyHΪ+{uHZz~z}5:doZeNW}x=XniLMEde櫒NJ)%S WnRvw#/t?a@yTф"}xoR= 3<_~g{lL\(~k^xrwtz&wᄈ"SN.KOq=-m-MuRbK?߫; - b/uWn.~|5;j×sPc$ָtB=ۻ7gg}y :!VzY_ew5C',4m*N@4Rsyx[Go].MimCJAͿ]L fX]y 73?}2ð$C03x"opVXM7U˅Q1X;;u5{fsC&iI2q3}=5 ܛp` } t{Hp<1q3;k}iWqՕ*vpbqZ` #s\5b' 3pXߨ;hu1J0?-Њa0怅3EgGg^&Q=(E%1O=J =w.؀ @֖^@/G4'+76H W PBZ-$ 6lGf6<,C0$ŤvZH 48 Ӡx9 Rw|! )D$%;dv$M I\N|ar`(h0{c]{ ԇ* DekES>)4q5S9 Om D3ɰlʂmh/IjI꧌asOV8,Y!p!H e"8(-eH 2#G Ѯg Aο-hk ܁grJm$uR.~[|wׁ30&ԑs$'jg1)@8Szs+rCC乖xq}'şu>aݤkrlyQ|Y '#*gQ&P$ =xDvL̛y> v$@T3A.T)0!)AGz &a(śBJQ$LB#E1KRp=KIǨ.rb X:#@9!%y$f"kZ,IxbuBx22))"Vd5o9c% ]>@O`fA$%nr-Sj#b.UDAS:Z~9`'l0x&4 (4"zg%YKGFB D$ ѫ+96!tj1IfoEc@, u6k4@{`5WQ ֝GiY%v+N/MpJ+)mD咂1jr[hr@(ʖYѝxXg7w}8qW"/N.ܸ;6)&E5(D?i`3-@,>xP*)U\߂a]D|$Rp]!!9Ϧj?&=Ď7ғle]ϩ]T>9>Qu\}*]ˈ(r)D T1DB-ME%;c$:OZ\i׊.Y\mBءh]n'.}%ˋW\B,Qһ8ȡNpQojn6ξN[`W}[lp['p궼۟BYnCvɼژk `V۞ލ^\v@ZAZ\BY̮`~t[ [~6yLlmJ7)SE&.~M89ϳqsrrL\${$W򗟎imTu(%e2/(˔G}YYF#g;o 8\V%,/:KCq^&umy5,oUW_\-JPk{]~*2/+LnD5WG7egy;.*{)(OV%FN>t>8RŻrnv;_^߳^HA}G*luUݨ+VPwuUR9Q]H]e5n /^RWZ!2R TWFoLr<)밑rG ; nme{|U-w8PE\1Hl[$6XUQY S\ky/Qk=}ϬrB ˊ3ee,vhq=OV@3^s+e-w (4XZD*R!<1gĀ9/SQT>B{1,˹X =taCbSnٴ3"Xo&(dP'ضi[1 wjd$F(B4LX8y6&yɽ=A~ԨnQ%m|71~I| D}%^rs6#7o\丟InIE#AH0&% CiR}@S}@j8.( OA0rj1gHS`j`[@Y>湱1TR$Ae "xZĜS)h'u"3rۢ.i>%K;"{>$Z^jjh vf=UDc1b'C"RSg|;?eEhFזNS( q:^2&xõ`AI>QdT2 <"lN4NS4PnBD DS5<'9UF3rKR,l܆t ٲnx1Y UgfZ2޻~@+']K2~.2{J}f\/{Wû,>[b֡D$m1ױ<#V vw{֋wH)cV.?.ֻ.tj?3,;Ė,jn=]uzhَMa>=?Ϧsje<;NZXy':[/}qK7X v&6gͅ}ٗ4TN;!iNJBTiaS)X[,XsKl?juUQḫB,ekulG_d}Oa4uW#뇜 A>!c&AJ]0L H"&N &D @'D̓9 NגNע[i瞭mv~>Y6vBiW^[*$yfC r3M/d 7y䡀=힞<_JݭeoV7Ke"`y>|`Y"WgA(^X(֎A&]XvR'-]O`~8],u)򫘏AI=pyq1[., "Bq#81Mw ӉI-=2[wv7mf>R5nxb&eq; fr GQ)M -?4`l}T߸jyn^8}iM-?p" *vf1^=^5,6RC&WCVx@* %i~HEu 2@rcQq'!2/@(iRD!= |;)=,NjrvU&FPfZ|Ghw `d"[-^g9)O$ q.<Ig+|Ap!0B"JX9@4S@G \(86^8J Y]P`H;:#+>ydJƾLɛ =5K;WKn[^UM.#%q8xǙ7( BYFO!E' DLޚC6a\ϔL) ^%"i#(U(Ѣ D!Krs/{< q+;~l*-?r 'O*/BHᆸ`ʣm( Jaob. ϕ饖C)&($4&NPrW& f97PJf~6~LfN}{FOPm8Z}o>ƋmxL_g$< JndQ_A.#z%;|Q&/){BN@{<(BΨ"k@^;|99,VaT1Vx[Td  !&sQA*4&ycr(g)o=A$AuP[͉n]tbK#ۤj6еjtꥣ{)R3&n4,xFYYA[ɕ"bJE򞪳_8AwWLJD$%2>O `)2d)1*Y'MA_->$,XM!`=Xѳ(RL2¡at|?Ͷ5k&fw^}y޳:3G?BYc$DeHBgi'2[O {AaP$0}G]/X$ k 1:䭳2#T+P°g4pTcR:e+ -eᔳ%Pbu8kJb5IcCO%`vFy1*2(uO,uJd8I(Įcc3JeWA׾-O @[QS8*ߚy5(Wy [9&.ZFpL?Fb*)iz͐eް3l NԍFu^ hKW ZKŞuHuKAi x<)d9?ePZU6g3oG 3}G|{lɃfA^;A=a#W7#7q)Z?Dq reEwq88mY uc KeOl*ml8k?eASq␋^Vz)m1 -FyVlkՆRP4 jrRX5[-bYo$K(> ;vf:ۜ/,e48#OǢϬu]sifvO;cKVhY! ^!Ws*j`UBpIc (C|-{բ>,_qC.Cm1JN rmmo=#Sҳ[|HJwK?vK6W@(>sR& U$d"YT)E.CN3(ms]\.K|IuOtt_+M/Nh#śotM~!ӈ~8:ncϓUe_E{<.,ouެ8."f &s)HF4RpV(`e-Ez0ZaMDy=y:^>]Sp Qn ^k{ǽYo'm؛_l:J`Trml&k};q5/NZw+u7Rcz1Jfj.GQ8׮wg>p>aO1r+\0* ]^zץZ"ne_|&+h~<׋&)G/qyӋ'\ ٣/id0? ~%b ;k'W/~?l<^̮E'Ӈ{{jeYת ٗ ,OPA[CrNM^a=,>r` Fҫ,Wrmrmե;HkUvZktһ`.|ˈ >87 =Dra0ɂTK~S\QC C0%ee M@J)VDmtt @v Ξ%@>7=p}e½}WKx}u.oўБrg舗ѨC",78$~? P.`(ȁ$C%LQG?UL%Qһ~8|uGz Q@蔃00A ^XK d^H{gjSkpƽxz(~M7AN^-~PPkĬ;%|mV͡YHXSA7{sd#~ڂ癢R)iqp H1n3Y d * 2[ )e2) Uu4ZRy(j\S| b1lDE] )0޴jTި. 8HL'LŐ/o#ܢ!_f'_B<|a7⍫ݰ^C8Vk>׳+' @qB"fcN Y@2.R&]rRDږ6K׸D7V)K$K Zt2XJ`YWwMeK7_u[DeO1:ݳܓg NIS>}oyЄ!MNp6w\21zpP! +@h,Jݡ?%MD'"s7֋\;i(n\Ec\m?vnwk袜p{QB{v9H"rH(Y`zSZ?#l5puW?o o˭|d͢C0!z$s(`DVV>Pg%B(W›( +E]nY)SPI&zc6l¥5nyXK(fElU분/Mg&5Ĥ Xt{os|ܗO^q>M57_M5+=|Z'p6&q&JBB/M!'f6f!ba##0%~ڶ1ΖKrPri*ʮ X/526ge|:x-tmpZl5~ eqr n_2/.[lS*:}U.KmFfb< /bO d€xEm*kY!+1*Pl`46 zSklb$r.j7/ھjw(NQ^gC>d' B NĐ. '=WF]66DJd/:Y5J)R.y| \t6jub8_QUx08xE7"|_"kN*8kM,E&<5dsE>$ȢՖnK-9h5½toM9I}{>y5sczQ"bTΡhvhZ>*C磭RZhC>Z}Uu\N7cJsUEsJ]7W\~J*Ҩxr:~9/?qMշC6%\%_k/ɿNɿ;w'ߝ;3mπTƂ6޲cNo-;e޲[G[v6No-;e޲[vzNo-;e,_Y2jEpyz"gIOV(^ҡ2l=x;oglv6x;oglv6n ;oglvx;ogl"v6x-lv6n+{%;zv-֧A-2C, B\AN#BK涡lW [y_^k_)fa2SR $4t( =@*ڞ wnjTR,f2v}},;kGYΎcGG{BG|st4νF̦4_x-<ʼD*j=KHg{ȑW.n:a 27;[EH!ɶ^[V;nv"5]MWdd[N3ޞn 7AZ7q9  3)), H x90A 3_hf!H/Z>@O NP暁ڂH %)h bB`ѶзEgYǽvEvq;˔ =iI2n:a__޽وo [yґ1 ARLF@@&tl-iR]iЄSZsiԚ)c$pqcgeT(j ^`Ih{K5rZ_?B!"AZN3Vq&4f% H R{.mO#= l.ho@Fe[bfEbx+/iN' [WwG1O1fB9koy:o+Y`-$F3쎮k+^vq2uE2ק6c[[Upg;:]}4e-wmxl=c# x֝.,Iw+o% d7M 7Ymstm9T{$JϭO[k|A2P`\e_JL]/+,5D83Xj yXdBh HVzd U'8#,,ʎ/,./,>Te t~T&^ܫ]ك/MuEe!`4|,r'j1D^1fZ$b@.-L%ʬbN <1%b[vo^t}FCذY70vW> ;kl8Yk8M)hk]oZ`VtY/7tRn+1@#$9(J)J P Tq|nqۺP#iWM-bmf$M+$wEg&[j*6y=~.;BchPU\cCpSYo E oK '-<~dp<\F=cJq"d51Q譲{Q@ц1FhSvӫM񜘜|Bo>ίRJ58xu-_wPaSuigy LdiN1*m?1\Qe_ gm#λBدB=// PmSII(#EGًEНHߑ˛03pGKʔ$ 1xowG휴|@,rthSJY pJFbP< b[#ghR/w)u: ?_5`l}.P^E7o[}nU/e *ֻfk1=^=^pqJel*.>Tdd^HSx)DXk(P[.YpxF;8PZ^EW>KF8>EBI")Q";KE)qkM:ySkFІiӣF{Q"@DXg>׼^ЇQ`XΰH%(NnwQپ_&gZE m^'m(:SX~ѹ]ѡ De-͊NXyjX"ҍX% }؟)N91W;!pK Xhy hxԳIؿ/ڵ1q\7F8t2r4 %wpm`s+Kcr[s}/HOC')*M7 :%ǃ w EoG3)ߐ&shN,p1|dZK@Ua*\:Yv¨|lwUpPnz~q5]Ε__i%j/?Wy+.WatG-\`tF7a// k>=e_} cN9 -,m#M!#]wTuUc#9[WOv׸%U—o 2dԏԹ!~|7)}D%3zv+?oFF _|V.>0uqӇp6wF8eNY1^<#x?,5Р˙ ̿UVP4+ 0|yj(bH>{-83_[fR'pu‡#&~! drz̤-ųV61󹱿 ۈ3&Գ772񎲏dT?KM~bJΊv0~=?ph~WLc"4U&4Sc 4@aeEs.aeGrZɼ*[! *!S>6on\0ْQn1!&w=]MP,Xy/}9vq\nU47 RGϵ[ ;Nze \5&ʆ~ ?U@([ϡxý̑'oϼ(+[} Q=7{Poi 0`t=LFDs2Bx zNʠťŽ`p" ` DJyH;~rfO?թxս6B]|.U磏^:GU}iB$ͤ9]V䫋8/ڈGΖMC!Uu!̔K)}c/$gF|-#-:$ȦCF2G qc(FBM i >sxqM@Ơ\C8X>Q(u7c~o]!DNyH6$rx h8-JN/! szmExk-ܟoSnmHrGjeassqo޿9c'_A YxCKW΃ +9Q(I ZWSt(nf:0ƘKoEk@, Z(-Efx\ QފҖVYW5!p*K\D咂R)8 Q1+4ţ=XOn"qȡrQ';e.#lRM5B%'M@Blr`N7llRjcϝ#N4od ěLl}t|Û7U|In͏͟C5~LoD 8~8*F.8?Q+qm=J?Q.JTWV>eѣ-嬴wYD2by:(%)ƃJIhn@涕U bXkQ)jm|WUfAy}p<઼̯Y|9W?y]xl{$. S 7nѲ.HQ먗;xтD^Brm%+A ;67<]]e*+TWRj RWH0cŨL.1*S+yUrxm^2=]`Wp> km+G0"x / v}Ўt{G]lّ,>D(SW$u7e'+H.o|䃘.ͽ><7-MDul?t%g;?8׿B97jtlXJa˫>kv]R$ФmCj cx;ש\>rKx&݂j,-#%:fu] ok5`$z ;Sx/ӻgG1⦧=aB# rFE^HnT'gJʃ<QMdls6F8[ǤThPM/cZl 8z8 /x߆XhO 0y?{~kLVS N_p0|Pu)I|Gk:k=,:|#]YY6=Htm<P[ViD7JwWV=RP0SFkYb8/9 ^.74h3k>R)YFcͪTV8l؈@ͅj$&Z4:8XUc/O'z`~:54"$ϪO)FiYqPI\r*To !܎u_G-e,^ )TY 4U Be±$eyG%mi^PWl* L8o1RI']/2J(k1kvLAF(R>ٮf1LG):9va|NMZdbsF:SDt=˚{!}e~]_{w5װׅ$rvޛJCI+BpXq *n׮i_%,Ye9h\1R q\B d4 TY4R!ݯF&D&\uiLq̒ʔ(xs)9:eEFg;BTo҃̋-u:wY]eb?S ]zp;#y##eSLpQv;k!7 %|Ny+!o/#y\fsHcHi %}hQ[!:T-%'e#2 ud@ p>cf6[?o+oe j=-8R76ؕLα(ݳjGUS>(^Wap6::#<=7E%3KTS 8([XTO2[fKWkJ^ jIL&1WM\#b-`bd{ hg;X]]^_O6ޓ&gq}q}EqFUϧ˧׻ UܮW,iNnvK /{W>7 h[m`?A ϭsͫh=l0opγNolXl:D-CMϷw ڡ畖-_}lwۑ[t\b|~6Sg^xި8r/zd}h3m%_WCQlН)y} }VzrPwA'gڳ`r=R9^M<ǜ(g:qw湋\s\}E]E]E`ܮO'_fH'8K%L%5Ҧ^HiJff4/m2Vr֩XJG >r5ҥFJ_b'eɖص5dٵci\!|p͔~Kt:D/fcl*=8;.l_8?gjVVq:I gf^m{xIm}6=c"~ki{2#QUWejV)tWoAL\m "Ĩ^./xGphj6愰dِ(W /5gWTsVƔmzT` T TUցBqhq*b+QDµd[dy#:\u7G߼ԟ)H}Nfc7KԷξnס_Nl2ԗlD)spqYYٵl6 ɢ P3_^}*> z!6[AŶ5d5Cc)[욓bb+bvDQO\=p\Y9gS C\]ZS&ZbbXr5$3`EӬG^(0ό.`1C'Cۄz);..H|i hmxaqn;֝9X0~ototz~;=(j29 zhˑGx.ruajY 1Y&Fl2Qvl my-crӹ,w:8;f,xKWHb uO嗋[6ass\{'jksRv1AR/PR,i#cp+#Ugܑ'ٙ66GZa@OٛM'?[ [Sk&6Iiְ Oڻؐkٺ\:(50ô^}x-Tރzyr^O7TW^y}pu7?_ egܚ?"1{`p HsL~_gy9m4D=cy,R/| )kyv2mtSjj,9lܾr畺]0x:L/W@s˂+i/}d^YmZ1Cჽѓsns_q;g{Όlz{2I E\>unk&s i@r*N.V|xbfh?aZ6X~*iZ9_{ܬG73z%*FvO 5jhVL0(ӷFJX$G+t͖n׋~BI{Y?Np?|Rvs09+-.Jj%٩"⼱lJ /V<'}/fQ%$GBƨXRcJEѠa!nNR`k8adhA vqZi9xB$vLiǸpZoB0XE?}MԚH.Id㢛TLbTA*qJ`P-uwRF*BcclZ[6b66`nLʺΪrQYW{[4g}#ղaTڄTa!ך(ʰ6&],?ZSJ ` yzrŃU5ZEvcV+U}A3D2 0"A~"hիӘ!+|̤eK`ݏS ))qȶ|w^Ob6쎛EnU%X~T+ɑ j=Y kRԊw :l_4?&@=egk Dp5H>5~\o-IڀR 2cT^+l/jɌxVE3!kՔŠb3L b&)Fa &`ԙ%EMÅhxGm<+ޥ$jS2U0A$c .(<%݊-Up sVkN=8 jv͌Ë栌U <(GNXtUlO%>d0)OrcڰL9m8Ag\Z5TaCYC`D@!87SwY oXJKIyA}}J.7]-c:M¢_bnLa{gd~]UCSX Xd۞<]F4nec yO8W_Ej&ijXU7^t8ڂk,:;)*@ɉaw%,̶ ))J* dXxcE;~x0_y'SaA[w56}m6F80`=.-ŤyT&ow!>P>B(k)d$e@4!4Jp Ў2&"kYON%TFXҹdBm1#Kx>i9nZ6l!& _KoM-cdnVMjSEHZw]R,9uP@0[$Di,?tj+Neg,({F՞Z|6՞ZsZSZ)U{= M^?mRw)toz12qb|h4Ĩ*dx' =c_1mȑM $햷EU{]kLDk4L<)fRk^R'k!_{/ߝo(;s[d=rccߧE 婦S'1[޺hx[':։Nuox[':։Nuox[':։Nuox[':։Nuox[':։Nuox[':։NΑ[[\ oUJ+=ֹvz=mq ,vꐍ.$;^j2/IiSjߗv7KO}l[u3qM66լ"saRG)Um;z41`+sfU1h\9X?|'kao"Ky|1Z2OB z~ȗ 9P:E e;q4Dzx7_?/1b,ө6`T1ȬDxQq'0Qxnxʇ9fN<Zb:}@VsǭdZ%<_Ża-aˈZY[tR89GfX?9i䋶/3VO>̞\_4GSVY-@TK3)hMLk>Aop+_,ayow{ jm̿^if<7]ɍN;g_^^z[~y͍ "oKdodlߢ㿿OC9avܜn}1iF߆qyOWrǿͥ]Y[ߴ75liqOo.B*@׳Ͻ:Eڪ :(e-C0nōKM[ϥrTKXU?ʢ6M:˜/Т?_װEY23WrOHs7]W;Kb1^mwVn;Oտy7މZV`E ƐMYcSPUǘM}-݇2>ִܿ:˽e֋ï]׵yK@^fNcbyA47=Pp,^WIF\y|EŴ+n־v0n#jO<[BZTA*4}T>P*@E}T>P*@E}T>P*@E}T>P*@E}T>P*@E}T>P*}NX>|<hϦZ/VjIEǢF~1 r_' ]|s̸N ;+^g"]U 1Q c8>y#LQ;M[IlG- E$R UL[8a L֜iE.:ekxWzpA#&//s߽aOoSџVRUJeLZnpi+ZaZ%-fq;>DVuMƚ@wɻ6 ط~^~߽EAri!<(rp ޒ9؃oIt4wF|:Yz>:hF砭v'sV*M:ߣ΁ZMsr7XjCwPC)ꌓ,70ebP<0Go D-j"j˸Tc&u{ʸQ3X㌅hl1jǴyuFR/EJ6zVSLJb|BJdy|sKs<{'!+ZO`n 1ݷSt f*#|9h)Wߺ#7:D!FSa7ls23ѰWV;.QvQMuhN6s;yZ}{3KglBFИMG&!1O>2Tޖ0D``QM:Ϧ8y5<9γ~^ LPo3T|5!S%< D5':J"B,( 5BSbkG}~dWRjϜȪJw1)VM Kh" ΡԚ j˳doruly }vf~~o6Z'RX;ƫҳy1ْb0ZD_hqp+LaTer8U:Lo|6}=(aWPʫ~C!a:%]И0\I<}QZYtcW#x'h̴䢴J'm4ۦ[Aۮ.Jd1Ы2&366b}M0QjcBs{Dƌ.pa @4=[0֥/OevLf!5P"ժ^bpI_ҭ/"Ӱَɉoa_(̓KGi{ th7HtqDg۞$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:K$:Kߋ݃V[z鎔q_S)y5F.9^oT2 4r[{6}ssEkO]# 5r`&ļ<o`z,[su}k8?l5O1vU0rV]rNvq\O!Hޫ {H끪G ˲C8FtvNf? T|9_\^3i77MxPny9|(ÊS08?Ec"ksiѵO&0>~2ٌ\קe1u{ K~15:s㱡z1B om"GvC—˼+SA * vAiy*!YWa׷ŶgqvGju5=% 6|[|v0ړ *)=J~1HױZ m>9Ii߽o͛ 0Ec]v*1E= V[R"S}j԰/ (+IVkTJϋ>\0kZUٶ7ZzשּׁpgQW xLςU -/JH3ݭ [w.; {$ |r.SnX+|9wE:}?EHҥSٻFrkW|JR. <`&%y2i4J%$~*-^Xr7 4flQ,uyx.yy"4>EΩ:&Z9y2m3m\->L[P'QggiP\Nu5!#~}eIvKCz>7~6X8 ȇFV}|h+ӯyn~\ϟJjQy҄ B절咈(!B R*"BTC\BK .֜3A5yNlv?{i, bwǯ y{7mƆΡO¢WCaƈFX ō!p"1CYC7;_"`ڮ˅;f] jw\k;*3])w>ෞÉG,_A~J0flJhuFyBg>ItVTa%=XΟl6b9"Ueo渄y^W۠Mrt9ֆHѢ׷۵mrFLr)+~>?Qogrx)5ɜyD5&!.!dE~+ʎ^ B(1UvTQeGUvTQeGؑ2B"g//FZsk.T? ,x5pkڭ JE*\pVU®{PU *aW%쪄]vU7Ҁ+#*aWᲪ]vU®JU *aW%쪄]v,O,A`U :U %x]uUU*^Wx]muUU*^Wx]uU *^Wx]+"Vx=\,f%"/UbUUw}R}7$qJCv{|˲7f Bk-#;/4T 8c1CdɱE3{ 5ŷve8Z|__o}Jew*5QRǟ~w`W3o)/z* Vp`R%ɖ0#LBDιk516$`cl tcc Bx,ד$eT=zmk5A!FNBckI7LXx**9-~ L2 j_- J7:hw Pk?>^$Ti`(G6)`"9CNyP((@& *HQ9SRX`R9ۄBy/#߽hJCA|gؘBT BPp٬j6 ߏnϴ^t/F?mkݱ]xD?q4- #K/z).Hq|)DroeTVGFG#*ibDC9^e*z:EHLKTE 2g* BvmQr`"'.]}y:3UU;4ro39:Q/`3O΅,?n.T.Kl+Ē \Ed_W{f{D[%CVV]i>cG]5=T mά2UWEK)&)X} i='2{r+J d?mskI[+ГVBM#&Weyx;_~Ƣ#$8X F(R NaE''NZլ(0P%PBD vCMP3əp9,YA/X9a!~J?/#f3v`[OWL?7+1w<dtR%72'R2R$948)!p~x`:*|y*섞<Ȍ,rݠ SLX' }IURu")"OtI;I=xg$(zBJ[IV6J|8б#V$&{ffc}H2zZ옳!Cz)*ٍ"od!T ݯ?ds%p$P&)JY>G|yU5G%0^ >UoDB [|UwuL3$`30 4ƫ37_ DP.]{2IsgnRacpEueX&[tf_`bybUţm,ܢ#%W嗧~VKi=sP;q1q^Y+3M.""kX2z%:Ҩ<O\ \ Y|Yhs2^kaIRISmʧhh+}–,D:Dѹ"57B)iwG=u" _(njjpq5٭ 8n+oÇ4L0KRnxɢ͒iE?o/.С6"~]˫]ٛU2{q1 \!d ZմJ zlb.mʇ.tFZ=~4ʳ귧t~v˙RPnc4@#M)#WBfS$c-VY6?иˍ7: ozd<]//}ΞO9"Lmnw~C_v.lޏƋM./j6ĢY/*O Vu)*?5[FξWG(rHUp6ak/oYsv!~}Gz IMfkgǼ\}kxY q-<jw=o"ߑ/g'@HA?|ͦ_/._`Uغh5j5n&͊ }B2̾L󨸬]we; @ ~l~lۇQ6KK2뇘d9ZQ72^,j_\bh󮁐 -sOO7qCw,I?wkXo [~VlC>>v4ryN6+leoG&~L 흁m@G{~pQoh F3``3A%,Ţ7dKj {[Pv ςG‰pQ8c.8J+BDB%6z>U|jPlnd>jG@;GЪ2!H2 Al5}owPPR`ҺXVF?mЉa0怅 JJ^.b28sRv,zࣕ*{$0L\?L', FbZ[xޕ6$BFyD@;~a,Tӭ&5$=wGQI)·VKd2UőU}6*R uQ9m,Y)ÐPd ogٞ sݫ8\ {OctVH? {02JxYqQزNIsĹI7R(lZ+w}=hk?LpMbQJ杣_-OSFN#;pwyѱL?}I5m4 r h_*1BqrX4WZ)!2r"gFjR\!gX9g$-f<uID.ȳvAZG(%#z d*CV sk Y ,V\)Z5qéVP?켤9PoWkB{s&ؠ+Ak }È͈rbvx"Nb03 ʇq€rr;6A*#PtW2v:Y=&J!hous% T7oh !* T$)Ide ,#xpJK/yZkņ)hg|Gi+ X֛i0VnV I}#Fi9%k-GlcF$MtMsinGf6MN9 PIMz&.0<z5JyV *ЊuKNj!BN6a/f"$-*3٤R>I欬g9Rφw~A=fzB- I:.ǃ` m`F+eT.pr"(9kBdu5U{Y+C}~ReFD71$gʘ"%)aYT jUZaq>Oz]6c,%t񬲴,^>Ypw !aN>bȬ0L,3f8A&ɱ`MZ%ChL(#M@Xk-Z]?5"6CnV/"B煈|(Wm MP!WNyJD%GkܖR#Ói(eSzc]^\ E;jGeʺw]syX;?]wߝt ;8Rz.Hh&Sl96لn84vQҢ5ZQEsX6NJ;pVu"Sc>v[uS{dU]o>6^|]Nv>ޅޖ4]la=q_ẎP+-.x'<0 bM;OQ1e QѢxwˇ\AMOiSymOy}Jтƕ1(&UcۛG|65<~]E_vk {瞧VPwǵX|Tnn|Vx6k -ہzޭvIsmH뵐 +6H|ofmi ng Y,O }-®MNOeǓ?_.3݊ҏ?җI[&SRPO׾O?>MZxհOdҋ^2U:BT {YGg;?'w+pB/^(m ~Y}zW_mzw_-z۰va£V|~y6`.3d4 F/F×KQ?a2.rb6Jvy6Zmr?zgs o!} fv)ArR[o&b>)^(&4>*},ժݑ`\'/ I I,*r Y#YQ Qkb (B'\fyb";.>YM!|~d5Ώ;8:q^nWw>N9}8_G  >ۘ3*YV'g4ԖXgEd,hZUwl%ط>˴j{ȁ$V3/u߶p+c$Zc^!:cZ=#@] ~S.$:,vV#t0Y>r`8wnҫ…,Wrr&Zs֎AׁTp7*{+"VǁfV wᲂ)kސ" o\qsUՃ7WEʭ5[7Wxv3!C~]s@q뚫IY2Ws\=s!+,Fa+0F?H ATNX 8o:s,L2,X<mg?HOu"Kޤ; NY*2HZ`#=X>;hI%ej<^lnR?w\ӂ?]׹T wCkc}Of?!.kq?|K͍(FnD.yQʓ>.o>?y@pf쇽H r{qeۋǽXZEVnʍ[q+7nƭ,rVnҸkVnʍ[q+7nƭܸS|@8ظAƭܸrVnʍ[>Z0^mORsIW*98G:cg*6BA>S>#se|P`PZ#Zɣ0V{^TR!¢Y1xP6G*(,twB >!L]P|7`wVo[~7<ye<}|4Jf/ER5.':ڈ,K;o427xe79!0Xic:f$B! eJe"% [,TU.M, 3prP.}S qYVgN읕*<̕e5q.m{#7:2*/G{:ڎF7?bM*ޫ"}GLsfLڅzzEo齎rhivw ?u@bп OOs2?0fawFv4[w{|=䫓AG̼5rzM{[6YI>x&.}Nn}íSY_=dGEnyIb6E_?_Yo{n7\p:hͧ_'[ڼi ,>xe̾rsܲ$y=uk|ޮ/!=xȄ4L* N)\1JN ") 19c)@V m ˨(ЀR8,\X%H&n"S2#9221.zoJC QDž(A~ځez~~y|Jv87 ξV:A}H"1ŀK}NQrIL&+d9 Tڄddf"ZC2 0p*aLRRez@A9#{R~H gNw#:'$> 4p<}D%\zǖC^ED$'qr Q+5gY1/eAPDmlb3=;MO;@05fyX]8/Q 1l9޼k2fR} +bL:hQgS^zr$ي,boZ_MDB+}7= Ag7\/d%TÈ^ ;;{ff5+*$('.Ɠu>s2M:%iUֆ`Aa0[\t7WY7Oˋ'Zk[ٗĭǽXm _bSE|k". .-G{z\B~7dj/ W(g|" Sԕ\tK( (P=+F5/ˑyu- rBx>! Tnb6rcAgJ@+^},K!G(|؉%0y)!p^yK`WLJ Făd!)tϵ1?tƚ62֗!cځt{'3xFcb4Mn-Oqy2pwdny,OB\H,IΒ"(yqP)XJr^{ꅓ|BiZKK&>$G/ B\9T*0/ }Ld'GlfTN#U;#׈Gu!1Tw{j4],y =&1)^i)̝t^c[i\Y5ZQ <[6|Ϊƺ2q Dquheoiu ڥϽMRkjPĽs` XB4ĸ|(@FSTDC6*Z.T?W8}Xy]_?k_ c> ,hC ft] ЌSK@^ WFoFSdmYao=\G-X^e'÷Wm5ڼ6[&y8ɶgZbZ+ƴt[6'+%vc2 &ř[2d1dq~[HTr{"&2Ϝ,K xcq  A:ϢMC8wUzINV|0./>g-j >ۘ3*qV'g4@jKZOee\Uwޞ״[}rC_t^4׸b7޾9}}{ج-}w=p,EZ%{_ԪHSX)VW`]6yjСhs(,W>>Vd2a2aga(q^2%qKäsAK:)BꒅM1i2Z#I*{eY!Hh9z=8ҿj1~˳`hTV_2ItG۩r)^&Ňyzi.iKVAuK$!;o䌙DY/OFĀli+8ب%7XAbc&#LV} ix#+|HR݇TR}jC=b7L=:RRtLôSmjfs@iQVT2'4&q f/%%(NT*́uk|1,jiM Ctx~0E Yۈ1N%f-JFC=Ϫ{kn(Zu2݈uW%g|s|WޢY3|OD4S{X=;wi"zѥz>J^@L F SFt&esɎ픍g!kСZa),%eBDs^I&s04=cRu$VV4dBT @DhS&19[U*P$g q 8i\M-O ͧ@GT7?؆.n3ۂRk62V~d& Me,T= Wo] i'շ#Ʀߦar{/hp82<Ξ9bsBfHkrK~1Yb:, ek e XT?tPF-lԙڄV2 TFj܏~$jO8mթ|*Fɮq]#>h 1dX&E 4 8eQ/u Ylr=.FǮxpR{U% )dMg鼝#JmnE.s7zwCZ%-*FAgmC#F~*sc1 "O5^D#LѻE5[#S "8Cr$3@<ڕd-y0@Q7FXu@z&/yp$)>!ײ1n$~9tWD{vtvV#*p]~ܵ \# cx,\wuR ,p-AfGW$hઈ XH+YHDW/,}9Q(} Pnl ;L0y"/~?l~i$Ȇ=~%kDW {V`QG$0h` X`HgE5Kciuk$f>,\=L}sMp0)p i"GWE\ŎpJٳW5q#+Jy4pE7Wt D{zqp^1):c>Wsq]\Zq*RBY\j׮ֳ}~*&Wv:ҩEnX}o>֝N{@"@Y/Fd-UJߑƟlC֯Ѿ B Wpʓ6nY,xؒ)@i&壀hrH'OfEfۜ2XkTnKTD `(S"I8*c*rd`RRE;8"댉"xhlgz0^Q'O-c@Ug|(#br!cEE)VJXGGhi7Ez? 2p.9nEdg.0RߺD0ɀcZ V_ I'-~v A$%,Bf1 FIH}ű[Dȣh8f4 Q38-0!nl1SF"hoE@ SL"XmL;gDX)7tPh"C3}8H#`> |1عgYE (YbG$e Q;-UkR`9a@UuIĢRӔXRYS^l-E B nH9*eHqʿ8SG Ny#ԕU='7=u΋Z}m]cFČX|.v&$-PfVl`$ȵ͚0o]`y7*ӆ !;FgZM{vo&ݎxG _U`W׻jU\,~/Gȯ,ebn>h׎џ)Ʈ(Rȝ  ǴX[]S.z5qk\ڹ@eG!]-kmJ"@BG%G#x<{hVxM.hzXZһvNG4 Ns%B H2T$|u]QZń*Zr#JPU׏\ dj(N9_% %A3MJ\ ;B ; %Y6֏56Csҽ9V${mAzw~(yc[Gx{#{AO} -uYt{p<:쾝M ZzJs%]* ]pTCNPn0 }hD:@ ՐRI)P(,YG.IB!aIB#dc؉( v#1S`i=)Ry* H,L."|$a3qv' $4]M'߾)Gy|5=Y ݝ[zBz)wz~V( /3{ɥPY- BmE#I&Z!ufY:ԂD72 SpH2+j њ8-Ird^6:ݞ{u|FQb>WE sý4ܧPʅ1#Prd%D(W›( [@ULA%cX"r$3߄puyX+q =>UA KiZIgɁZ>>8u9~l fmN-mɈ䊅xjl2[ϿN5tnH8A%c!.As\dtԞ^mzgK%FQksTz(Gwv`4v1֞8=c;ҮIB+8ƾЌVzo늌7p,NDﳴ\5y/tbl=)aX&}4V1q2aJTRvlT6d0Vl ;|Q )H52CʹP{m?zn)lȧ@2dA`m߉RP2ғ=~xehhcCNKDe_"aȖBEgjKvZ 7g?WuX~GG5ȳkk8rE>$Ȣ$̘ME%E`<*r8t-Kiǡ^rnl}6Wo/2ȾbQn~|G~|m$Eɐ\P$;^vyjtFP}s㾹qP@9E_7 ,i+M Gِ%Ƌ!yb`XL(Pe?žaׅ5+v]N;.t;~'VfVU@hUO,Kbtj\|bsTNn.׬QV/g7ԣ9KКŒduJBHc~H]^W㶣/=WЊ{3Ydɡe_bN_:^D(H% Re}r "޺D>H.(R~X ҙ@)Hb%r4>D'M`7)"&M>Mh_܀<T^}73FlL^ns}D5] nGS.)v?m(DVk*u*x8t~G׳yS=  M2eSSQ-xVo1A.(9jOl.;WR#P-S&"(7^{c-6p`hPE#[UB6K-x5Ǘ˺)5Z2蝈V Bҩ{`]"mDD nFeG&!8^ݢ67/V<+ّEXND;Jn\{;])ֶ,U#^nx@{UQ~{1K^j"|߳ZyugN[0G-׈/ŗ'J&j$N}J?Zlh= e "c#$ vĉMpl@/ f^ ϿJ[7ާt& %S-IlҖd,yGU L崛j(<.tPTl̀7l 陼 z(՝\h?ޛ=;|snRN{\*&N*]p` Fri+V.V.; ;Ub9APץd-TFKFx1% Tw1eF:IW| gjۓ$'ț"p&`0^+A@iMVJ}{v_VPm~'1Oos].X _X~3=0=hbFVxKoroam1Uj2hJion-'i1O򑿄^~|der>NغV;^OW|__&9]m VX4g%3X-''&jg,|O[< |OK|!vBSz8nH[<|>w Nx}'.O,.o= V/judʟ_O/g 3ɟbezv{'<O'''[Qӓ%\O<6Wb)bM*v]҈#M(Ǝr"i7nj@aടw0h?۾3uNmȾGJ$'A>h$hgk9tVl} DHF`Bۆ)m DR)-?8|GL&ed/rkf칻֛(68|dm򫒜~3& -̀~uU{kwp͓%)0JBj"!yŶW$p3+VpB ޙ⩘|M@.gEi1lL@`rR֐VCIhX79wVJA jA\{tI 7̍(om{FU(>ݘgJwHg;]R;kw v5E$eٽwȪ"YxȢDI%D֑q} cFk#`y`"!b&bcCי2M Z"uvJ1 cjRľ*u`PyuTj]A%`m B"LJhEj SvRp\ (Kb~un yv ZPRĹ%*?a^g;$?)A zecQoS^ Zv5O,UPD')#p(r #G\f.Gg--iIᠧ9,,I_LMC\fK|;S},tT^oV}X1du]9|߶b>pGA%,~CzUV +g!gRm%3x]]d0rfP.}}r^n+y4~a~y֟L< }q$/@y `kw({Ŀ$zv]pt3JavW ׂ-"\] 5ZEA,g>D[JXsw6 b*-x. 6PDФi:̔T!IVT}=ه^S,f]ܚi,\Cr;- bl\׃| @g75=t=aZRTD+׆ׯxd?3yd)C4׃i6%byɜ'7yׅa_n[!0_*k_#ͺȨk9+L҆Uf ܱU BB0YK_u?ÒҞJOIy^ l=|4=kEK> [ࡐbPDTIpԜ}=t?, J^Hϟ*<$u. zo%0L,"F1uSOU=RkJ8c+X -7ѱ뭩#T+PHgk8*bR*"S"JFc&X[=(w.%8eL3]4RkP3b# &(06`Kwɮ k/RQoښGWp# pz8pF<_MSJ$7@l`?0EN93X۴tgEq, ~/-wy BG`kvֆNBचS8ogmk=s ?`} h u㇓arJi{i+ce1-ϕ.kzX;s6Gd~.=\zm!LbO89aѢཱིq(ׁa(P`+(4t NlqDY؃-nGa= Dހ G1b"P{B0ɴ'L%詴'LQ2ڵ'| 'Ҁ|2* Wh8vuCw%+YiP@ٳz;6^cCY{ UBbdaXbp&sC*'˧AKtg^/ cz]֘" ,ꌋ~"IUgK^A)G}}YGH(򹖜N)bxЄ~*ya (%r-c1 Y\ŽJt @93tn͜XLjAžG׽0ͿyM^,)X wVs6)[Gklsfȵ1O9aLl-jT\gˡ U7^V8RZz"!㴇Ώx4-/@0Bu1#ä F@wy`0iUHwǿFW_߾=J[:*(3ya}S"h"3 d`/XR|zr-Mj9)8n:*OM#jtu=H+>}W8]f#pWv'g'ܚOyQ:b"g:a<0r)u? L =a#J8HQ+qV >Dٌ%UL& F%ɱhQ\1 A*!lhڲ6rv|V~70*|},R)ADj4Ö1JbF&$F %$(#DM\R #?kg*`~e7وG'bT2c8jtpL;?qq8G! 'VGF{ffKY@x@ G+ZϤzPGM̴s<*@cD`F91ցR5&F+q唥q ""0t8F~́ГcX؄xH , a(ku:%P8"4+%ģ=񘃱܍t!JkY:K ^OaufpfKⒼ9RT̀0n#QW>3UJ0L`p xH\14,YydcK /S"!&"7s$k_kC8Y]0~]Rpf堓 Cm1__*ۋZ X$߶>7m+ Z֤Ro੶MY4p΢,l0T>VQ Nﳏ "_IN;rQ^\JfRAzGx4YW*UAZ)&=SޯUg??{iSYjNfvt378Fc.~;VWӚͺ/ ̏ޏm. W%Uۧt).FxGzxPQ m5<4WgMuy Z_mT|m(wVl z3?rijL7 ($e39Cr'25o?|Ffj$l$|G@=^[޲.n=%Np@xCk$wkޱ/.o͉(d7c»T[Q}qCKm,7I{[^|fnd#y]Z٪4Te^J۴@Хe6+bh;sƀ˦zfEtrvrΓ g1Qn{o' N\ǚ&|8nz]:ŕy<@D/:28`J{VgUW”")Uk Wp 6Jw+ulƜrm#=6yD;V"jp "5]Zd]96tHmxLDImtJD>W!"s`t.MkI.\ځКy[r]4:nFM$18Дz}Z)DaO]bse6]k#|{Xn.Mі2Ÿ\)2"eъ 7h/j\eWoc88[krQ;*1EsuW:$Bc*[k[&1ioi ӆv7ΡA 4}QDՏ:z5E򳺚gQ]ܫQ8<݊&ҕhw%[KoYW27idMY|!uGCS2%:8S%i~qխ'k\J/= :9y%ćEg8wY,V^;W^2Y+냉K¾-a$EV @qF [,Hp?䆎 "ګ3gK U7tL^lkzevz"{Q0eGۇxM&y ϯis='9`4SnseQjs~1m#Nၱk}6([8S [uo{tX˕+8}S&M/d"WѸ$R c!vj|IRr%}Sri*Jh|u? ?8/G_^,sGI﫟_܏Zw`n;G_S2$QV2NIcUJ'CON:lZ'}P6ڟ: zmi?u` IK~_yN-diTt̮j/_*)S'?_]Γ$Iz.7?Oӳr C<O[mVN)6QSnZo}K-n3Wע+} FYԐ.}!sM9JH23*gh齳2>_NK dNmG-}{\eecؚM۪ASyo>egEw$h}N6ߪw5 F!T#F*5Qfc$lt#p?T־k;cHڗ $@P$J1򁥤8/r!FnJ[Y8|)*.cz'Č\d"eN&%ͪy! Zuy_ i +)du`jg"gqHM<qq\|hco]AZX G%+Ň6r!Zס5JUIQ7oU\ĝxrfŕ!g׶Tdɜ7}k9XU+ot qKJP!MvaLLJ@GɣS2۬TUh)Yɥ2Υ, ՃirVb2HKBר]3*ta5WʺpAQyeANv[wZw;,j~p~>T~{̬gI=XAx,R^Hf䠂*]%fd5֍bJ$`2F!EQX0*fR ,GM ]c0ǢqǾZWھ=AydV;@PB 1=)I^VՇY :YȄ "+$Z5 J  G< Ĕub@F5P;&\51b<[?ш*jxF5dJg9s@OAh|4Eɳ(D 6i&;Θ) j%r虘"8DIɒ`rtFFvRGҋf&\%EYY/^/z.:9@%0̃`T{bJH9 P%U<^/B/>CUYTh&W)^{VoW 4.q{\Hُ3+.o;_hn9ݲb˾;P2vlݕ1P]w띶`3za<;N.K/vW^~ӪD UXLch>g-hмU?+tL6Q}UM0YEt?s0++`=0(H,}뜓f"Z|!C .k"$4u=9+9E<&wMQHISLPx-&#smj-rvc:gܙ>׽ﮧF\MN|hg;}5ܚnuU[1#k_ߤm7=9犲(URmj5:FJR mԲ ^jlt#/v}=.lzbguLSѽh!',{d(vKXzN: >,i&LȪLȅ`I*KdtT$B&.TNB e2 %gЖB3Tivy{GFņ ,kKF rmpIb]r&!5kdZjR٠&:ROr`jVL_u`cƶ=/4I Aa%8vq";Y-I4R !JQ2TT:L}ΏKXc ./堗d=rRߔj-HLLz6xI; 68cLDgGa顶*of 9`dփs*l\lugN348镵A&$ǁG;A J! TZ*tMJdGo0/Tߺ6Y'b}t^h5NNڕrKONV˵*smd]ZyWN‘"ԔID? G*Ob#ՂEl 7e-h6׃tzyk{{iPt}^6$k_kXT{1]j!|sz~ ;ͻY}㠎_-< 9{`*{ˇ|/ۭyRϜ*. ~=uyŗ^&K6op:fr5~@9l;6y^A4o+7n8tzkn8aHrbd|͊m}o//ꂟ ~>e]mȿ|y̡׃O+{rFVOgWCk>[d1L=o_?arw [d]}8l]Ma(yue"dؽF+fKza3nxToU@!X΃y ?Y =wz{+hn{瘼 VP,]FgD%cO xDσ⩷qr66ث&I"(b`R) $fn2XԴ1Lqd''lf}|J|jGSOYOKʺud'Jucrˡ/]>qRhG _.3ME1Wս8U*n*?յ'Aчey{Q9swy8%ERf} "'E-Y$ Mzr%=aE郂~Bڶ"vkT9`]S7ɋiakˎ[e>۞u2x}~ OfW^U"/+"Aߙ^n< wg8vP"ݻ)${i#^z40)˰D3ن+%&aB,ln "j)UyueGtI H  *,\SHt9 醜8WPz [\)iP[SPҊhdxmglki3bK凞, =VEvՏUz^v@;7v>26yp=o~uulݜ70orq2ꚁKJ~zE-kΧ?up{}~u}>?H/rmJpSgI)3rk^Eo<4YfߠV9A4$`#1[ޚluZJ3i8AEeyXDe ߳g3! Jf.z8B]H L 䌏%:ȠxXTLQ),^(3L{g*s Q$7sʞjl虸.7ݔ^Aw|ě0a4o~Ԋpֲѧv%.JʬNkQ8!Ab!C$OF{ J6XRjR- D(ߚgx}_3cKRWQJIF & S PJYR/)Ƚ=,гzzymȣ . ̠r4+QYRx[|x鈴q{Unl4֪O=mDkB]9w33o3Q{Z+ݸgf1% Q$鷃=qYx&\F+χؚw* H!뗻ꗳo.Dw)gt!:i}ΡH9sb˜1*B*4ndM"Ց'-t'VGtYC5+9*P99[V%X\[em2s< I`(AdA  9VxcV붢(z\Q/@Xt.9'$(oe6iK ~lʼnнx:L2e /1Lg&gimy|wxQAZLX"W2ݕVD-NyPiTytzFp #aj;j; \mG+\v}\z(z@Y_ßG?'3ܲ0azGvQtz?MlkWV|rWye : R(uf(.PwB%~x"XW\Zከf{zpE6B!"5U!WQk:v*TpJrxltgJvZUdWo` X7VH~͌6gxz2a! |< vD&O8ab5MFZ z9ȆCՀ'3cݷwbY .A`PkPi{-´s!*K \rUj}|*T^|pe8ijCpE^u ]gW[+ :^*v'PGQT1zl{ZKp Wۑ{77,4Gul+ծAc:WDD*վF;\p& wMhgj0 %]l ~W:^}R?,K8&/?<=QC4#W |ZI!q 3>{nQY9K@ޓbuK]Lh'|:_~yJ>G^ w8RLu ᗣ-յ`w%{PJJgqI )r4GrY Kk5ʲNHՒ!XT<>$3^Dzh,!:()"Zf.l D &&ŁKj\dWƻ,6r4uύ׮s=oFf{htPY몼겇x%Rq׀\UlYr X-5E>ʂߑ? enwh؜aPwNel*73 B\=sy+ >8e/N`sfNWi\C"irwk Da1Jfw&6&sw:$xNJa#BBP L.r &C` 8, F6m)BRƫ9郻Xk,!&>q0xM;O6kGkmTDlTom,Q|<o{IME[Ũ(Kqpsm಍S[[`p۴-wVE_fX}OXzl; OWߐC\عef"Ol=Vڛ#1GYqZKtf(!HI]xLvg5ΡTXJL F 82=C5祎 }pTP>N~ve7lUiY\9Nf!g39J$Y9 GĔ@`Ze[NtLNsuw0*H+m}薭н.Clu/c 7! %  4,5sB1ḺmV^3.#/Ȫ, 1gUu%Xq6{2 `rr4Ƞ#ʁ\uLr h4KB]|a6G]Kl\aW,K$ v&MMW҄Zر *ӄ`CFh u9.i3żL7"o-Tp_[fN]˿SUgznyx@LՌEŤ+69]m >}>n2X(l(%73k}A)ҺP͸3ST`L%lZl,o143;ߠ3<朆:w3lP *8$EI[Th9\"I9vqBM')yU]Q:% 3pRȅh9b␉$39rY*3y%d_j1g;~4yOJq;"h:%KA:\қHyH If#ߒ @ABAHa reI0* j5r j:->zdU.W=YvTӋɽx*T4Z®(B ZT0LmHPR[/N<9i-m!g֌TxK2X_JZVUT^FKҭ}`A)ChY"P0'%>23{!3bUdTFHGYp I#uُN$]iFjGzJ5,b½b-EenE3^`85z0xz74btsq>r6̴֖Z7`e* yE<#3+95"C2Ս`4,I|*ƞ P:( u&af@ЪRfAʈ]Gø`b jW]QVFmۣvSʨ .qZ]p̙䑘>caJH2iɊ"išH&(RIc2v[~_hZ "w#d 9,*)C#G&(x?ٴQ6XEDk8+6NYi.Fg|`",i@-Y7g^'\73:&_g5.Ee\=.pIFc]!7)b+lNYb J\cjܱ+beФ]":cD&kJq42 }s?/q *1P?ƃL%S9D(L2H-(YSdKy 0FlЁY;>Wpz?ihR ؆TviaydڼuՔe{@ڑ6֪iGMGi"G31ғBP4~Q$(&qR00xmANג^GǥzUԅʎ.)Қ xIT :˓6bL$ӹd DRX[c_r+\['@Yߦ?ߗ%dͽR[rl;|HճGդp۸q. $Ʌ [,c5`{vs6CY@昽NԐ$Nmp]fW.T?ksx^`]}<-|uѫ14^͚ h6;C{n7;PyJwvzo޹ w?tM<7!)qkt䠋njfo߈Pl6,y7)RGY2jm͟6 ߔ 4)ӕki~SfЛߔY7m|3oUj;olrf*9B‘6glCth'~R= Y,?J_֓S wnjLp֋z_h銔4/y%9D(';ՇOcQEᣚ[zb5.?zڠד,]Nhuag{pV0?8'ߥw&Ke(>={'\l gݐm#kEm+i>%TJ{Z\ 7 i7ן>åW`,KU4:Wܼr*rP9yxuK/5K/5Kn'^;n\ ~ p',ZEa>xҺCHeғ\iK*j^.z A$ep#U"GXgN@3mb:2}fKަo| >t^ [MCf@?w~if=rC>˥ޡf fUN%!Lc@yF8F(a!)3*oE1UB "@&Ad]$,#50HY>D-PJ`f'`]2F"MJbǾ{N[!!i; @y~+^bEH>~T8-Z%A#MiQ/U>HNH2HFQy \qVF1rKqyfKQ,9YD&KӅ3^sidbBtA2+2HF>J}oRs* }-סEv}ԃ@Qߖ4?m|IPE7n/6)/\cHiPJh, 3>Xrͅ]4! +K߸#}X#Cubȴ=b9 z~M;~ϓVE[g%^:]b7ٗ__֡j ίE٘J ) Cͅ|4kkc'?5awmā?rԛԇ{~´QT?YaGNZQpZizqvVg5Q{[=PrnC5Sh B N o)O6iCة:O׫#i+7v~=IGC}72rzxwG&{ܲ@r;"FcRMaM};gcăy_Ğ\&ч?5جւ7]xtL.`җn$.D4Y=npj5x<7 RvZX*('%J#mpF;T91 R:3ca*Z1T"xT49yPP{ k2)Uq>[bM٪ ns~OvDOɪEa45^џ4\u +nc=9Y/V>)hKiw/Q~4c-ՌC'^&C6;\;&EJn%/s{bkX mcuv~\@>ZSJ轎F}]؇~pP/b^rѾydSyyYPzl TP\=ɂ4cѯ=&ů0zf8<]{5X\m!>Œ9rf=hfe.G3l vݭ~7a>> anߝNŚx|l~?ɛ<m A?7|^qL΂II`xpNyG djȼD|FT^4{O8Λ! }ɡeP*3Rf:h CcJ(gd6~Ae]̺ؿk#ⱎ/|.Gw1yB͏sUoZ>)()3Tʛ\IGgs1@P)(I<( ggƠ>JE:y$6Oui@N(zR JlT"EfI{qA: g9!ZYI'X1ݼebt}[.6АoX%.(,cƆʑHKx*:T9'h w2ILsDNpRerF),iW"I&(DgzzUedzE">L@ӓaY&{`NG{Y>ZƐaʘe3$KI@#Җ5^! }el@2X ] kta'h x3ht>;P2ݫ;Mys_B@J 䡲LJU- :Ʋw;;{߲'Wcx*R̥wLĠv:zYhdD0 Rd7ۭYG:qV>ߣhz,C7vwxۋ~+{o%%:t$!DKֆ%5a HlIMV|P$U j? o/»Ao/AvټMmit5iie¹65Vh<}ݾ!MExSO Z8qU+%T{Yi+X*eBVcu;  @;, 3Ÿ,/]d1r&a} 1L)f N&1?&9hBtǘ0qNJFpIDl_ӹ:$ϵsM4[{^/pX5Z>*[m9kt&~T$I%Rp8#ΣgRĎK$(J~&r/OG9{c 挌w&pOg%}<$s!ʅQϲh812r:h Kp`DŽl:`މ嬷t%)/K8@b&JCq9"wV C[`yx,#G7`P 9T;*7 } S_-Ɗ~z{;ӐtCF $J&!aL6yd ؛dRݿ{qbm X@R}\u~I_?߮2݊Bb~ O(yiy_((ω,^Hzt) O(3om݆7ɂb:kw{]?M~Y~{M_9YOSwls̓\;l6? &/̴mպOŤ7Oh-gRc<88FԒNvy1Pg$ Q(eŸcx=0 !.'_WgYri?O.o֏7n4ED[_P9pKk9Gȳr%=2$2lut) qinR>:KWn)!MA'|=KɊzLSq2mɱ#~=٧K,?0}=ߺ܏sQlysmZVtQgy綣F{ f;:H@)P54W]. ,hdqT  a]t= (…jU;aN ?6ZupTV:BWTje܉m;Ңrg1ЏU/T6Mn Q*Bg  JfF8u*nkO;vbCeS`G(A 2r@$KJ)!&8r2샶nP"HJZT(*emgJ̅+422AI&ٓ0q<6롤Lujɸt^Ktv7< Op<جγ Rz|}}9~ 1E sYKLY!hB+%PE->7#fc3.mv:[ʝﳴ~~軾jbh6|'?hhj{+:lNW5H!)%ǕAM$B\Ⱥc "( aAO d^ iX`BӖsR-<v ;ap> ")Y.>XN}˸qi}i_/16,qDG[m4mQ돚unc!A9i9IB5YD0Ɏ(]>^Xo>;% 2lB = (ONt:g*hc\tAQ'1{POz}A3:׷Ϗ%#f9;>Ex{#{%'b/Ehn+F(u]aYcv\tW.#JjA( Ϲ5kp>!x.vJ'ӁkҀ0A~^~ HYi*Dfv6k-q >]IEE!AZW['Zԥ>8>[-[*zi2_;UM 1_nvO1oUޣ6jmiKdt0"c! y Q~muikr9VR?1K.$h삈>E&E͵$3]FmXMݖV8ʶPFrm{ju&%v/yN_;!ff,8# ].-%ǂ&'7+y2 ̐&]H4>gGdBb66U*d@uY(Sfhle]M;g9]muj]eF="ح8E8:9-Jzi$eg"z%i'Q7r/ x "!WELlMLD5G%*ZF8ZľlJk9sL> D%%%<LZDg9c((]ֆh΄ЪDq#)"yB WX*=+[j춈_=@vqt5~VɡvQV|]ܶ"$yTQa6R)̽`=1^;%4(AEhabCllizv\*]{hx 4u$1YV~I@Jw)ݤf ªBc)rdxb48R06K}R&%-F'GIu&0o]'}ʁ1\Rd * Q:"𐹬*kCb!'6LDٍB7,;)-Pq )/ ZyX+$e&y >FJ0]`yeܣt AL2#PeMǸBDyNPQ&\02K=VԮyvmv.·Bnm9OiUk.}Bۧ.~ˏ$\0^?8nht{et}zIolVal' ܼBm̷wOޱ1C!ost;5=;&Ln٣L@z窋ckin.3t7o6 E혗/ Wy+znoJcNȗjʬ>}bA4rhѹ y8zGa: SZM/!=8BdBZ&50L3)/"LB`"Z %b1rݔ8t,mokxڌێڿqY`&Db).5){ A9Hd2BRyRЎˏcZ  Ïb x &)ĄG 3.pL!:,#3RP Y>VЫ4Cd,}?W%!y<# R7~|EӾyJCӋ}cKEZҹs"OH0)ަ)omOonNտW3d(Gt7#|Ϫ˯{m ?Ph7}w,MB旗m*fn*sD%)r{XCF)nW^me#/, Y/#ʅG /.NqYCS& }g$RđHc ͹_UWW0F4=B( sDI@&TL"B39 2FiYpseL~eh j-4,Mi+E&/ѥT}gcWv]'vge lF;Cx6+Q$P! 1"wyýa{H,YFO]1~|Ef׼xt4^V\ϔL))QS 6qVCHEE5@{) >VnfPaʺU)'pDX@7c5=PJab.u\}皞5m1)qltD5XTҔrw+*D`s#*sS|hH*C'Υ(^sJ-qgy#qS-q͐3j&d!ʼnU*o|wP%)[b34ʜ_a9J"B}@C|G7[Jj7>xG;VnۙRszuaO:R=%FaR=O7&p1Z܇߷@pW|-\w,-i^s#Uu6$5hXX(5骶i|occ 94@b'f04̳bѯ5Xk@->?zÝbKA!)8y r J- gN؟L_T]ՂijI)WE7t1%}X# / _U}d%y8'\;Yzeb6>^}uo{zCc0Yc :c5SԒ%+XdZ:qCLx? &* '"8r\p dK0>@ ͤ[je} [QD.T_}8kڠsXՕ 8%H˴Qټd-%&ªThA 1'XSQxQ)MΜtEI ᣕ =w.؀ oHRRLkKCD/F '+76H WƁAR(I!F m6l;ȹëׁ<|"Xai )&s֊ x!7*b4()^N]^MihJ"r A>t D& .NBvبC4J+̝wg *- TaOu>0LM<-1ʹJ&b)+uFG{J*W]Ia }ำYt>9QOM*JY":Z8+v݃}[JA[,dPS:pH6`d9T GW]VMIb%Gun/rz;CN^NE^a4܍`)F/?ȳq܍\p~~r7j <.wRv^T\X}pѣ rWYd~OeG:x]X3.h+ۈihsd{)$/y/^[pekWhWO7E޼`v?0gjqQ]gpR~x骲Y{ݍ.ݞ(%zGAzEs2Gz'i`TƆ0wmX(aY2y YJ.zY:uOgŕ?$[Zx6yEbQ pVƜ f/mX LkLHڅhB&eF2 ,Z#Ձ`DMlD]MXNZAX/|jA#K\Юf a*[C~ߵ&h8HKU60)yH9&~)-8h q¯-!&H2%B,&R("=hJ\A֠IE'v&#*MhP/h XY) ,UEY!"D$P:ъ8FMҨi3XE۸|F4$.EX)ţ0لFLhW *Nˈ m_~,ѓCQDДo PTZ鸐F4.)#D0|g4SiLYe(ީG{c]\v&!hY;kL&ٱz;8q-euK%4?D.@$t&{*611Ι|SA7yamT$Rr] ؐp)x]v ǪpAZP1@CLQ gRXfAp㨀}0f!`T}?G?UUyNJ%u^Ͱ,r+j*5'XOi[^{MlW/w/}P#9uN騀2-6׊]#WE?-n hG2e12U2Dd#p|OYQ%--Jfhm[u_GMQq?r4 }6%|b`TQL>NձBCn)P|k_tq}m9Vu'W]S4Q;yj R<>Ag{j#X=C~ڽDݏJF:A5sLR=sUlz&&?sUS+aJ\{+ә"]I FU,Yi7B ;dċGgJ\l^S՜~ N#L"L%PZ6^`9'?w`%9ldM8FmF* ld7* "8]mMh哛vy.^_kZݨGW^ک+G"(it,d! Jh`$`F * @S:Q8]W_(#i:x,ϝVj#LvPg%V{،qmR۬u[},a݈@;w_nmm5JkmcGŘO3X5ŗ ,0;;d X%G_òW%'m ndUb?TFS{H-ϽMRяe)͗OrnƉ &*)t*;@=z /UgϹ@IjڗW% A+hԁV r|I(ic32ZbKL֧n쉻Eۓr?{apj3Oy-Up?~/_p5&zYiMF2)jfNeJ6>T"[_FQ̫gg~g˿́3UWX8לo#}YZoi+&>v,|w4|+@W\Ƙ.JL\iEa+UO\iM&$&&f8Yl$a*DŚ+ I^Hn'n` VߵШ"]_ͳ9l ȋKcq|[^.RVIc<:: w}(\^}n^5DPDL`@ t 2N" =!!xK#(\~XPld/Tnͣ1U CA=HmkԹ_u!Rh0@Nx`!1B8*gqox!1B8*DEш٠RP(dԺ^B, A gKC8~9=GX7ƃON<ew;u#ح={m$w0qn_.~<FP]b e2%g)MNHY~et)ɧ,~TQF@ >HQBQ:A#+UAuuκ[?eSW_?oŪLl 8EK4YY-51Mip8;&_%2zQ!@5UW5MNha(W eqsvg gA(yRdU D.$!D3HYwXY+AƔ{n Zg) 5R*ܥ%X+ZRкlpI(s,8C`p0@EU!1qAۘCIֱjB+YlVd DǼI- &iYJ@f^F֟2Eb8ɚ/!mr~mGKA6,D{_ˉNn /kK(Z VhFX") LuqSh+9D碥bCKHw@t@,][κ2vU:Iơwl mo j =6x֝x{ M}L?ŖMV0 dN1K1!B-?b]LͲS%{1+SRZUC Bv*dW؀@lA*bw֝;ybL:ڡcz#ح8E-$c^Q;ͺEE(#3 =1AAY%uj.bF3dFE&Ƕ&C)EʣeȚ)2&;aKѫa<ە|bOth-bo7Hcs6"q+1,* Ȝ6̄n-bR뀹12Tv&{'&VDf N֣S[κ"~&vq̕|IɡvQwleo{Mⵒ*>D X J25}me1gdj]<]<{ؙtjc{#emmMNtۮ;b5AaAp}㉢2tPF4|k3A04 ,Y]jr Ax̐@}gqCvuƬ9SԆp2;,gD $(0KZJȉi%.C4Ɋj-#=nMW9R"kp޳Yl-5R+å}_VòdzCw#׹&ڊ¼-Ahao/]T-}{^yY־:eW>9:R2ҫk.w62>ཫ5~@_w}Lf/#|1fF2|q3ݥes \pd :h}yϲ|ÖYwlIf`If -TՐi~hxbͰݕ,5j{nqD*7y}wyrOyy8ϰ9h3@[>>aҩA}g*fzܳ *[`2}4lf7 GWq]g: %ϳ5l. >_U{@z|󽓇pTgi?nڷDJ? lz;n{$|/I r*b1`FFL PTUEBSv(^玓p4]~8?rHB HIrR:k*FE2<@a~,fOu?5J&<Ƥuǘud~."HO^$U}iBJ'm( z%eec]i OCCP*Ogt~%}P҆%*cE$0>G0gL)̗o(YJ) tW> N;,mtA pW({a$p&M&R~|yGnt3goKDHyw>MA\Ȝ҉@^섖|הє"*;M‌dU!K& ;P5aF*җ'+J3Ll) +1(R;`:d/֫>$'>JW5e;!vR`\#a#i<&l5!)BgZ-+Kv-%}(I[!*% CP@4r6?/ d m|gw.}ͻV↦{rvyF[[\^ HLne[ OzKR4up={^ߍ0IRṱ' :Om_[9e?v[|RM1%5liRARcDLLݜ9S7ԟf[~26\]]Cmu4]ͺm4#x&4Zk0ZDIG"[]EUF#4Be>N= TZ^%?b UHYX>"BǞκgb"2}.J8B?,m&o=-ێ> j.K^ϖ(c}أLH J&A0YKYIzl#̫Z GUb|Ng1 %I+01>"mHIdh 6dkUA:(lrGEdT!gښ6_Aػ4\jkd]SrU&O^EP I$ 1it3I DpC@,s$kG5vK:yQ^қXY% Zzqe|&_=s)( 9M °ütHd (E).DŹc}<d:=BO< yDBfqL8 /W+f<-<re"< $sݸQK`dp?1r֧@>2 v|H|&rw>m=c"@{oM;DЀ֕PMӢFgm{Emn.>͙l^jēWn3qOS:1&ԧ `9F_(`X0~?$39LIП 3)&iĈ㸋\=]G*H 2B 3fɓlji|]H왽@YN>f#3%*+\NfѤM,R|efWfG (^tp|s+^ƌY?0[IJ^6 oD%XsrH7]~ξOo_;Lu$nFr іPve-IhME`|b?u >uфu&c`(Hw[dU2-( !O>}*" ߹q !ӕ[TӺ"\)S)qAk <ٱYx̪NqեcF>-e(Ez<*>OHSm_plrB>M7V *eQ3@_S-A&2.f0[bN(` Xjr,=H9ê7[l]93c٭| u{'٭JV%yr=L眸=v?un*ngLiR(υm 9 e & ¼&BP5$C*1;u[mn$9qq=鮽˺_݉>i0 !-r SO\>FL i27iO/ @߃Apa(RO0Fk5z&l%i8]"q|߽8nKT]#ظ8C^ю֡:taZ0kwa2UsvwT0Ŗ}\q!+yTrxHVtE;<$tBI@ &*P e5]HXZ,3BRgZk C;tG]k/jI&F.5Y5k"SWWc.H&Z(݄Ң$īZA9^zouDR{@Ak$W]jiqi":pknkl-ipgJ QAZCxjWOM^j:G,`bjHo7ŒAPI(aaDHD"# FT8G"W BJ(2gP "c7KBP;B} J D adb[PB2^T"P7'F)H"U SG'"f]xL!UML-7uIyOzZA(d ("ȃ^`l̰8hQ+`8ZN&dG&pe:N*ƈrb Z5hEf\9ed(vDxVYm \ M}97??'5fW=a ~ 7_Fi˹R rj  FŶb)`Ƥ5m 3 +$T΂#"ᯠc-y)7{Hl2ۈaG+>3`?P`L CJ(# ~wF>cY)irސϚZLԺȷU4;.xF>ۍ ާ1όp6j# 0-( FID(-h\m>9xHazmEy+9ܝW_Sָ,7y_5|fÊoo8bBR50,:(Į@eUQ։K(0ʚ *z>ylfN~].KI$ 삏8X ~zSSu%6هaQK:Z nTL4Oo}(X$|,Ǯay:YJբ'pϯpԄ\%>J*r*pժp`PTk+8r9kG*W4՚<D9/Ɨ7k1IL`>Fr2/8@43H~Wt2/zLc!zgK$[{Dp|B0 K*O L'nөzHRPLAw%OB@!=|_Og-B V a-̃O/;XI%ME\oS=]`zVJQ,)oeWϲ[m1T6&?ͳ[28|cP)C INy`"R zO ~/娐D%MhAɫTFgwaZɥu>Ah*,LsSB=˳MkzvCiqljmiZhw5;{=xncB 0,j]EOTnauS-qIHo=S fSš>=Viꧽ<}=dYbʶ=\2` 0J[p*,OLvJp5•bhrY}bUHjLgI,a r]^klͷ|r3W~G\N~yoV8hs&Eaیs l1n鬜l:AwGȹ`SXt*0C{@ꇮ8u nVx7{E.CR/dkajǬ &M A`I@Q{}aOQ8N3/<ѩ5 \&5=o9g<_ɧ [9z˿`Ѹh6n?Lvcj<ƫ(f,֫zKè%pΤt#N. ȨC `=B%Bk7{n[\5C,W~lnV"S7S3J>+tk`|c,bpR׷E}y!zlA(zu96ֺSHn+$EsNZAa.!DL O ɨp#G`_e=0ɵV3>X'fEEKa#XhhG@?'wax>;Φ'SGq,w0b8UbnbAS4R EN,w=0|-@ .8 [,Xh]!P(V`†TYaF>G+I>.!;5BifjP,erV`OR<ͫ'mMsa0,0/0V(хH@8wxlLWZ!H 5 '`E0cLžG`ߝhl  /y쑧Iq$[[qqZ/>2 vvQ#0SD.#·-gVD{ijwD e4-jtцᐌQnhf-KmxRƏBa0~Hmc}vFj"YG'O7l: ݗt1 p_N·8~ y4/Hv!Y h%0ٕQ59wcWX2M^YfTO~&y҄|~yـuфu&c`(Hw[dU2-( !Hx~O Gs;W=!t}!uE(M._PbSu6b(߻SZ ?\-g{8W!. `pr@X T,^(ˇmm~ )/Q#klS$]Uzk/o5dN8y& Jmo25%A>佊dP~N?\9BS^Aιeje{Qv+w$687kbĺYɤN08#zNɃ^7? n''zVRuL~q_hVS5>neHa_+gM*,  k_F2a ?a |kE Q߽}4 7*5ؾ0n% kS4W9wEtoNk|p鲾SP e9ʮ }$*/ eC1zw_$CM vj_{_vR0OS?;ۥ׋82⸚2^{?hv#ɼ`BoPaË?/j-lřo\5~> ]d4noWqo??j5 WH\4:P-SBaB`$ 8_sռwz=n{K Ukf_.YGI +|OXdP=M½pcʏ`b_=jvdƁȭ{fő>oy'm`6An3k")o*q^ ۄӘ1EE&fQv-c9pƂ[߰Ӯu}˾цA7؟k߮T7ύ!:{猥\h @j!I+S 08$Q8p*ĵ o$G#uJo=5ѪD2uAybPV2epN*5Pmyn1/Cvj}!8d^^G 2hAmS1=^}.*P}Fٜl9&s]挣DٜlLN?!XT22/)@IhW \#u@9zNx'?zXCZ3vM;S$I \x*S.{j<'A-QYU6֔b05)c$pqw&AsULZ0mi ѲRpNLҍ9?dk#|>ik.7N]ݷJc>_:usgYn+t]`BU+Sbe$K8{ː#E@"50|G Qͷh(;v8q<*""}?z`X_#ߋ8 wb1z?VrV| [*#V XAm@n5dս>!Vb!L|rH4&||4 +kǩ0O\G!a׵\OYϽ]߽@phV ^zV"QLûJ '<L'}nmuet'-{~n>ոh׋-ɽZ-$ϊu APU-q=T974wٮ7ZT5YoXh|].]աɾP Fd_yzȸ{;=Q^U7d <\0r}g'p+&|2pP׻-ѧua_稷4k@!XY+’I(hvPe[-AC}jhRm(E=qI,ب}$c.Wx1QB)O%tI$ԏ(ASOu*583ixs 9꼳$g%9!Qps\Dp=u[73W)}oZ[wzs3R}s`(|9-KIᳰ㉼j%oU $G v9O>ZyTKsn nEESgT\v1PZFU qF$-vs8BT 5k$ KT%3⩣%НcZiS9[q8uW }?>Sv>9sɵ3g/ g&Cg>![>afdO[rRJ:x w:OI&dsysy'zL*) -c4f%ɑwKQP)8Q`+(%tqKBw >LQفrD&\ >}6r!Yo=|&iyjb^xer[QXExii 8s𲇗=(̸ r"‚O5LG[F_2M[A,:FQ 1PTP9E>STHY"!X(b Ɂ`S12lm8'VK 8s6Zp,zvsܢXjY4֭'tx@d^x3z=u t, _ңE1 9}yy]`^E)=+F0OUC5NҙuQnpi<潒GvC{M67=oAN?3 Υ%A7@D%J2yI\uc+4%EcTVkP/]"IϋߺHT VaJ(=KCABd$(AIG+S[/$~O٦ c . ފFXiͣA` ͤq-$=-L;ykD|'v}p R)(\RV`3g4S`hTL'{: e_tHlRM!._@1Hd!w6y&gkg:*2OeuΝ=N4o~~:y<ڷ\oWT%ɫo oV "׹1^O7O>L-~ DFp~w-O<_FɶR|%E92+FwgZ{8_ GGK@ $MvxM2}gHDIG4<=]u*zdJϪz*ۿ:"Tx [ \ṻ4tqf@k1wW2fϭ bCG]>T]f-ꇟ.< F,;jyN=d4_P|.W擪nSx!z#qX|Ƒur2B^ +ߎMdmpD>u}V~g`Xs<>0sW546$cl ~%Ogk% Ti]]ءkM'!&ji.*`w4R|2 vnmQ1I=ʩ;.ͻ#E%>ȶhqKؿܳnm{aәڮ=˕|u=@wm؇p{9eOhimtɝG˫ɟ\)В]j"u qf\J_ш[(&{bh# Ep_MJUq6hhG{'.1?U!cjRbpaMG9KMm Sȉq˲Dz}|]mM3*s'HLM3UŊodu;6Oe(MT07ЁXˤr>ˡRZ?.0 Ȝ݋2JJXPOzsٳq>O'ۜ)|z6ɳ̓Le9UO]}>s؍{q{f֥V3J+:ƅ)$9zN*s`,yʢb igY_Y]48^Xu/@'y''gzB\[Mr]|WF-, Ɵg&"Xq%tQ2Zktx'BC"ќ'ˤDDJJQ6:y+B#5AYɦ Y◄Bb'*UUZzf2FgK'EzXحo{P֑#&a\GnԇzsbCƃ0Yx/Dh YZFxbJ')y`Nžڂw[:&u!F J=>X54rg:x`Ohfm{Mc3gKvT-0r|[b8m{ߛz2  QSa7/O}:&B8dty%KJ!>EΩ:&xOSI ^skO[ oEQg+Ҡ>Ϙ􎰔琂pRrEhWaWP;4vX`10-/\7PzP2ct4DN$FT2ѡ1au{dV8814HoU:.*㼌%!yU;\PlD)Djb (w Z̑J@mB[|@2-km #{)M_Q8Y>κc[~M9&f; 4piQFPy̢$Jb>+ oR7[3Q#xV5ǿhD1oz"`#M50&UTq+VhLN!Du0&[Q ^qVwY{ke <,Iqx-+nʎFYDn1IT9gIT 1'"]zsG$bg#9YM.$LP hF&J7؍yx\̶vcұ/kY[)̈ 6R 劣k⬷j-0`rG 4\5 wZP@-C E{rMhJȴh8BxtM 7a>fLc}1Oy=#+:\+%]JG/R㓳#"41,#M 785I*ʃ2ϸ༆=i[ES 3bcp?$ǤFGIP9oy)x-|ؘtˇ0Y (lUgM?Og# 2x 0: >@яLk٥BجUn{ hW g3JB믰кL+!BUWUFkXJpN5xĬ~> ssRu{1ƟV׸,Q8U"xLWU [4 ^l"y{11R^l'~!x͛b~pyZlY º_ Չ&sJ}U{^~Fz; $Rr] ؐʜަ 2Rzp2VAqv>n-|ԯ_x%@4 GJ&խh2Gj<5cCt+p ]!ZNT*dWHWMw=ɭgB^l.D\$?-[[ΫQ]w+D ~Jxw~M1K~!vQRdYT+xD$  H!FvgΪ/ťLX QΙ{mɶ,k^W[&p+H%S钧2֟ܨ=z_Ι`h Nt.nFi۹Qʶy=7? 7s!$+.ԓO}7HW3B䲧WHW 7>|lNp]+9t( =HWB.M!`Jc]e3`FtQJի\,h-l\dit}{~lyL|u0(lceZ")BW,Lg Wufhh(e OW]ZAtg*heÈ2JOFDϣn+ )aLh΄@߶fv-y]DFtxWh:O"g_4mx.; #~|.yLE5 A]A|6WI}ﻄR k򷨪W}&oGލGӢNB7'=y_x7l*TǼ"?yg'_Ж?G57_gY-o/;-ϊ[ 9:9󎣅2 1|ME|^@VT ۋ*):JZܡsko_)|Y'ygY3 .P~ʲ(.;c5Go^oaL9.̱gǿٻeB5eԵ֔-ٻZ7yMyT^A됩Cw< Wjm72J/S=1Q#hGS(r'l@W}QJ$]!` 3t B -m۞{z+̈ ]e.Z-NW+FU &;CWwL*WHW ]r3`m:CW3h muQ2k+iUiDNRu{dA0oN훝WvWAb!k\/"S/sRg@FZ2=ڮUnU}~r{}ᒤn5v3r(vKH)ؗ.Rr]fUoU0E6}ZBT`V[H Di%W,('/oo' o&ؙ'+˷ zP ܯ_ri70]R=p7{*̇^eClWGԦz-Ko~dWv~M{໻nv!%+AR=+߯'r|zr6YR^ R8c>0¡!'2sf vNcR>=LS&P\@$SB3DVHu D&N/&Ñ]'OWwN|:CX֚wu,e(N_Z::>?fȩK "%"Wϩ:G㇟WPÎǔcJWZ=,}y]moG+t{ؑ@py $1~SCIW=!tSI`Y4kg~&ubyo46֋}9W~YeSv ܰ~ 3lj&K˲yU3I2oE3̳הwq{v%Й${m%)+xC bՀ3㑖bjFukߦ4Pqi8ң͏ x|eoâ^ #lF:]c"n6*/n.´X@BF߆|Jiu3|T1>@϶1GDv10c6l`…U/bM$/U\;ڠgA, 굔y(mfgT]iͰr4_<͏t.rl` qRP2oY󥋁R dP H#'p;f}ɿM`Qn;x`KhmF l{B]:_ŲM/d~חOFpڋKN=G. >=XX0cų`1`-[im et`E刊`) NP*:d2l?S}Zz毯7uþ)s]1XdYK*:^NL/%u&VSOITC߷:v8=#pUT3)a`mw]^Be90$Z9IDg ?AW ' +N(V h3 { )˸p3m 9YS;هJcc-u),C0!B*Cajj0&e4zl"hnDduk'dRHD#1h+SrŨ0VK48b0 VkD\ฑ():QCRf@.(qCv0I$r@eٺdFsü㩙'?k]֝ۢ3DOm Yƪ(rDyteO&c;$h}xiN.)GR8zaŏTf9!,8!c2#m=B6pKU9ed,+2 %ZReR>RLzSNq飶ěU%g&0r#cG|\%fӌCPeB1`QGilČ;>bڵAi&f2v."QjrV;5ZH| fŎ0!jE#qAŒ3 j-$& fh/#R:"*3bg~ĶoٴP֙Q[=0؝W#w?~;gILM'6Nvk;qJoMO2f#o{kKbۻ8ؑZCj%srKeK(Fg`k':A1qV{C"J*jP+uj+(KMźפ'i,ut3]/]?rxcdx=V=K6-եs M"(RdKJY^"_⩽_ɼ_$8i&l} eT hGT!js 8M(rnխ#"( m:\WHHps;_Q蔼_0WvA3gۏ;i5<21w39/y&cPvTxHT%Tκ ug' ~r'[ %\S"tM - ;+r,soBMlg]cOͷ<̇zOyjt=&|&I8_LM'}_U4VS H5Aj]"^f)CJaK.%G{F75nIڽiKh݌?.w9k۹2O-Khz/v}$yxX!yn}Ht[]-sU.l>1bN~}vR(mҝ8bp'>K"kc gr% nWӍ̸y E[eIy%"6:\j0c`R'nSԿ8Mc5Ry5q3Cp !$2R%H!U5)UE:NXc,J8j~Mh,c4Wvqs*;%K <\v@غ@a* nbaE Plk\&2".j|J `V@ .8NVF,.` QR 10 HV`E y ҉hpn;Ŝ(areN&SBuByK&.zx$#'5aOQrW71ux媂vʁG2+<a yP RCQC$08wxl'TŅzym#0Cc 0^f Iy6Zx<s60"<ȓy]7nA-18<3rY/>2 V#F`\FƝA[jgVD(0cgdO-:l_8$2S_Dr[vcėrxm"BwaS혅>Ä p$=>rQX00~a J 9i"VJ81t8!Kۑ3d!PEpAPA w$E5/`?q^])"^z3j~hҷoS :݌uA hST`J 3e }c6Q̕jzL#YI*^%͹9TuFύ8v#w>.L wɖNף$W ֫û_dpu&zu}˃ȿDXգR;-$@8tzQfM@d;^6t} u+(&.?ߔbE˼Jw ;j>^[IjiەWHK[?bvkD|O8:'*Qw@Z)zxZW+q{Ubh^V̤ym^Y{uGդB,)e}@k6q6;NI}cPc>*l`!M>,[e0Yz?yߓ?w6j#%da&f ,Q݌.iR?}=㿪Yy.gԏ~\Rn9ڈP7 YS@4TQ\-ڥjw.D~b޽C{R?7:,K(78>gp`l"zG#3b|~aA.䬚zԴ߀X7f_Q!K0ټZT;_n!q҇8Sm.õ^G1:#31:*}v>ogĄ[*|_n:~?t_S`'Ň/hAIK/<"GɨռѦwz[]{O?}Qf|TuJ^\[/iۿw%D }:q33cO֙S<ҕnw ܥ6r)zfHm|DϖK5iZGubn{D|owS'r8ɭ8>{YQ TmC̤Lu&FRSlY@Q<^v1N $=i}B()Z"C7nr\| Xy\ZA{3“ǪS.Awx`( & 2@Ԉ?ҽ{q.Ovpm鴀e5TˆIjRx(QWE$ʰ2xddJ <(t{MјJYZQ4=#!,^-ե*l.N~~nh}́mY8i;yiEdF+uU$V3]]9EBh*u/'U/#Yw'SoڅuʛsKtvzЛb<_է2NBu4ּ!U<-\kO+*jkHۯ/27 uß&/gQEZ77O6^+޶|٤lOx3iS=$8N?9?3i+\<@rg>on}&"fYoK3j 72E}G=&gy'.@ 5ϿIz4z3.2 kUhk|w3cNEp!<&*-H8.AB%h6a^wڻ?ԑy]G[N}ABQ-|"s 8Dg}LK +i1k aNpl%e%4PўѪܝ~SIAO]V6/|JaU*o:%Z$a =b'='C0SτJ K<*nL\/ސ֖ дLxZ4c iM`SlT,WC(V?è>YpYY\|$s$GY>;(-Gu:)ѺAz$cs oCdQ\ܠm-AHnP Kw`|mc>AM:X&b߳'o/r$27;2P䨍ow"Rr4}99p9w#ߟ|y7j%_j^TQ(e2?VTQU 9KU޲'ړiX/^@"/. & mn]wmޕy9,O!xm Wذ8Q`ٯQS=]GGc/ߍ'Z|.3ɼ6PAU8el 2YX|xulqyV4_ }<ˋx1y,դ|^λحp'%gKM+74o zwt1*|篚#nL౲%0͋/FF͢Tx9sU7ۃ_w_>. ̀9=0"ALjMF;AZR0IC21Qg1\s#SM.ν xC|IN{A>PgVfbW7(H!0'oϳ?~w41!EЌhDKǙ-fߢa0h64;!0ٹ!Q! sR R[.QpFzkNk mˣ9~KQ?blگQkuxƢW m?duLJ[&EVzLHh0-s}FTq[b_ȃg%H;o'ho߃=Ѿy`fEQ]3p6跃}s$Fې7t:")S: tB)5'!wI u|7'z:=J!ud湤PN Dsh!RrT^d+gT>6iTq WNyEO&'uɉB cE0u'P)_oU} 2NH]VBO.UK6/Up2!?  D :IAH@8&G+ ;Hng[^n$xZHJxGibdi4ශD2 e9PYjm%Kq& ȑ"T%`O{Qi)]ޱuFΎrֿr8n%8n`Tܗ,kgXCVRjQWd5o9c%`.E PG\Y52\"A˔ڨb1.U"rMIRT8UCU_`'Ww:#*MhP.h XY) ,E#I"( ptBO|'U趫m xe{4jcLRYԷ1PuJ( u6k4 \E5H[w-/ۭ8_g,7A< F[*t\Hi#K L&M.h%SV*0KA<2Vo}8P逎rVoϟd𖲺ln*VD?Y$ &G 6ha-> دŋk0h[DJι+YR#l !.N]cS~vO:Hm_Q\kJ=TU 1PKwQ߼3&H|2ʧ:Nܦm)BM2v>b~zy*wȋ76T|y'ޗ/"XiIϊbiМtuQ660~lQ2!r3ɿ|H|ȕ䯖bV-v-b;hZѴ 'T(*ܪBQ\NPR+{LeP( E?#k!W{l;2\G.7DžpڏJW|\:h N`U&UVUR pa\\!U&A2 2 WVW8p<Br5#WZP}L\=BPA}+wL&'W\s2 RҾ;\e*zp%~'WH0'W\&O22"\=Fߒ(EN`-NGԒ2q2{^H-gTNG FӧWH0D䪓Fj~+SɆ`ǩgD*7"Xc~;.\GdǁT=p\:T*k82wTv9zT7L tr΄NJ̓40G3 9q:."ImrKZW5SvѐppjgxKY mnDMgx`%KzB M2:9t"XYPKp j@ahF ZuP:8e!sNl \&Pֹ|,q1PHip6޵qd׿2;Tn=?d$|0)38 gܙEY[̜u9u5$?W:a}mͅ>TCA97I➷[Lp6yCz80 ZV&:?%{j%{,*&#i68v4GD3YFu-zP-AT[1ևAJk\3: kJzpv+5RšDiY{n!3q5sP ccK5cEX]U=QKʇ<>P `>M߽8d ڦbҥc 4gRj2 աL6( cUE61b5]8fOUa}pyĨt ɣ A>0 4BI%2_7.ro!M%8uY*/`T2XKwh'!Tr>ݟ7Bмi>ϩVRnm5ljx7tCOF8%֤i`ts=i6u38:AϚO*aی UCh%hJʗ VCK 6Y/*$UW8k /-@;I{m'jS-+~fW!'c#ZZal%QEvtL:ZaB B#dhݥ=з;mT j|r`)P["@EE; N#4?vÊ#_a2P.AVx(e$cbu±e6P6ZBJIـojOc[pf4giF}/+pՔzpEMiښ膭RT$Tkw˽AAzQ .lj= RL1L v`[jVَr5TXvU s&E0 W؄:GiJS<`NmO J+YX;Л!ՠbw(cvR)Z( **JnxUuAS[K1 z,,xO0LX;*q bp0ujMPPgk>dDu,<l34~Jo۵XaS1D]`) WRnґu* 2}iAPSW`HuWA"kfR<Ńm  e͔`3d5> DbtۃB6zpı{ɇ |fʠ^-=ڛQ,KT3f%4' ǎ1K=Ik!pBKw#Q;v3^\7jYb^F [ŪdG.0QCیZXI| x݃Ky@G *}vl|V2G\ t56b U&5:Dfa`ܤ`a {{[ɐ4Ra2ӚC5 !Xq`ӥ`]M|ü &(! _15lmBtm:~tX0PFuMOU;Rݙ@vV/աHj5W ߯u'kWB,UǑ+/b Vb=ѠeTA!DAy1ρ 7DR&/ ā paF| $$L)ڽ,=iq[SAh[R} y@oBR @XDjFh+oVQ`:#,lţc0Xtҙ$ fZvZU'J)C2?Amj Dao<d<\oFIeXEcA9QgũB`ƶvf]%PFot3,*5:k4άfz Ե&T829In$l&  |4|Vsp _qkhʾ0{y힭ϗcF˴e\$?Y0: tAff=+[{ (j\N> >;Zma6fѲFh 4M"c7g[O*3&5% <%:`O~@rXj9n?T3F U줋]NXSAaKHπOt4JyxHuX뛶&fc_ >N`E_1h"^uN sXߑ'=CE7L.Ba ۢ ҺGIՍ0p \:%XWXڨbjsđmǀ洁6[[rr_$xITAZQkԦ5tvb2bj=ږVO{*6LLOfFZi.C N> VAP4kMyaA6X [̀~122.O 4%AIzRi0jzm(u qL:5󻁷 NC`H~Қ-jUeҘ"j9R Y 5IԊ?#)S,)`ZX-. v:XTk0BI>z R DŽV/Xي[Vn+voVhy5%.]aSvTqr+oËerTrgRK_PzCw__emv]$'ݯ~\_'U7o)8}_9o߾? e,E'wSn}U>L<xϼ_*=uӛRGM3ZoI?n6gϞiNg( ?VכZ'm.ַ?~u>~}Ƴ/mwnoʈ74o?sX/7V4ݚQťq3+NGb_FgbV@II)ƭHJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[}¸%}t O pSMZǭa|O2n咖[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%qO4=݊qŭ=nVK)ƭ Hn%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ>e*`*nŧ|VlV̬>n.I)ƭR4:IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[IJVĭ$n%q+[Mz7`~!.N_^>mN0S ]{o]aQ>>iC 3Jp04tõI1Z:tFaO!f7enJa1Aid3$)RKv2riNQlI,_E i,]N~rKNѪq.||ǁqx唲q4 &Xe0[Vq4w +\Zt(xbǦ> ]㬚b̆nts+u;]1[f]Yt3+6q>fps+uV;]1Jzt4fDWl| Mz.t72Q(tlJΈp +rJ}V(b&;]1JGBWO| >q~J&oz2r~#H$qG%M3A7Vk0teT,dd;낟Q`}\GvQ:- :] ]1\BW6O"?}ed;Z-Tڌ~'IW9㍡B ^V/"0:vZoz:ޠV/k(by#~Rd$k'?<<7p=ïߖ^1bsrj{NnW~ޓᘉEñ=1߬/?l/f;~hggU^7w{q_ ~c)޺ee5`)-X}oѶZ&*[~o'.\ =Sŭ[G(1B_)90<h{6wm\uvW |k~ԦZivrņ2ߵ'S[Y6~a02Ra(ӑmOj^NgEW|4b;]1JBWOWn6tpͅ?t(I ]=A?'uQlGuyJG~`V=b1̅V+Fkm$G"ai ^4vݻ@;tKwmmɒKԿO0uqږV' )::AR,  U ѪcmJ &][s~z5@cAY1lҭ$ld{Y/&eBwje)k嶜;IE-h2`%[d5 4=(hN1Q[DWh]e2ZxBN416,="zT(5>Iظyz+Zgy?=u* U>ک2ИéԎXE2]v Ms?X;r?RR'b)M~Һ5tp5m ]eNW] ]iumRWlHW.f:tQN]&]%Ds0٣/ę.Mg0]M#\%E[h:5DFbMM1'±+tZs%{das^z]bEt/-tk P6mnGWBW Et x[*=T+h.kM0ky~(%ttutA+mZDWXă*BUFYQ]] N%mʀ5k ]!\uQC{PNau ?ezm59mF4wKs~l_)7=aWǯu?7oox:YL0N7(!^]UzpF?#R^ J?!+ dX]@kFʘ*|FK ٜB1'Ҫ+;">Ǐ=fK~yz? zPpp-qIK+iE k[V=>X޺+ >o?>L^&՝Pz{~Zt^Z2’pg #Djz!T_0oO?,lײXuս_~^'b??$M B9ŐhsB!RrTPo23*Fw H%\oy1UL#4!ޜUqHN/2!D9is\DL=fEn(UNpǭT@9`*=2l]sيmd]=>Y[f43*YZM?;ף ee2ϑތחfu/U9/ȧݫt:)WM*GՔ/Y~WsϷ}c'Á+NqUЧEQOys2=NA8S@$c{.D4ļ 43Qsk?oσ;?;R@obp2XO4c&as bhQg$ P4HvL*g騔! gIhNt1*-@68;YW7+ uS_×vq3Yp8K-#"jE1XchK.;nފ/z>saf'h"0nr-H^b[]EvMIRԪQjٰI/8m>q1~yzvF>LQ*gTiWO|fQ`4stH $/h.nyĴ/Z4 $"9%#ZcgWϤrnfor0^&l!V'4Ϊdϧ*#Wør <_oWtދQ1I9fQoEc"G-ꔂ( u6hQ Ņՠ꼭v=M =mf"MsC9l`>Ri!H.)#D04 &h%SVY'LйG}c_}kwf!z;)^Os F GbJI_]MD]fiV $#`' 2B'<37z2[);4.Ͻ2/gG꫐_rwe{3j^E:# 4H44N }n}zPCXEZ8kXt4땩Ggl3ߔTs1 :\Wɾ9F%yss6ʿa?xMI){dFŎ);Ϸ9-lL(ϪY1`vѻ]S6wKrzPjumVC]FTR 43;c$:@'-Q]ny=!DABܡ"VzJw;oEf\ oGTIgOK.Y?Sh|; 2kDfFiGv:n?\LG'q6=roe 2^ | _oo@;&z;y0nEK!j+ymRk:RWml;u$ŕ/5ګ/]h"u)-:E{^_jJ= k O:Ԗ (F|9!ENkɹ[@#0@='.!Q9FO }\D 90j L(.IaMō%˩1lCCNo cM vXfFC繎i,H7RYyh}[=SOQ>9J )mߢêh" mыU&_tBNPv[.[ݙ?_ԦݐP}d˨pG5@[{*^xFvͱo شgy+hkhYB] 3^ok>BenXḿm)xڷ^5Tfr=nZQHn-Еv"@iy]]j|3** q9d2WC;~$Z/\ ȂG ΃(,C2S/Z '㓍7 ,ɳS)1[QA%B:]s3 LqxL@ &g2Qf$E!R)(SBmp!l=q֔l𹄫σsۉ& ֎?~Y|_o_o"P a5"c(.L& 9lI.ƂSU>-HZ&Wkx=EX,Tp|yיljo1KtbWMG=W&v6?P`ŽJB?fu"r cEmIr-PR]zq3 P2i#Q$[@G9/[4Iz݋ vaHW]+"ɫP^cR3G6;ֈp{^/kevc S::X73%"tJTAsC`VBAQ:r)6K}+g%;:y7{ñxmUCIH\b/@O] ߍ=l:R8dty "<8^hCSh^&S[{SA0A#R8:8K}1a)1pRxrGH(LS81œq5,@zÔ돨x|Oa<Aiq9@ڣj$2 n.rT#ݎ_H\8ѸAk!d#i4Ho9,D2n, SgNѵԆZn^NQ4\#HYBHF% )PP#C޶Mj1~QY{4sbaiQF!zf1F($6 wZP2*C1rM(XdKyxD ls·eP\1:.8cMb Z)R>4}dKGD 41YiPeH165I* Aq2ϸQ_=ioǒeO"#}bMȱ#}J|Hl9xHrD$:LOuQgWW%MFbҕ9bg(p`-i^A:ji|/n`bJZ"J<:̑/>_<~ؗ឵-l5u!#wzc/9(=w>)pOQ)D8_2;"clHйQFDf5@H/8(lMh(ʆZc\3@5Ļe*=\{WX]a*DkFS ~F9u@A,aF =$P<]w9*~ʖF01EgXF} Ƒ]&Ve59Og/] @,V6NwR/Lzf  ^2 +I^ޓUEvH&K`@.v\Ӵ[ l==kEK6 8ZsB!QŠ8E|4A YHhu8K! =A(b60KNxo%0L,%E;cf3:Our>5 `Gu'vOiud~)Gw6U Aǂ>NmWOw06Ds#c4*!EXN3%J:51(A;. s QY덇מ ǘT*G ;q  9$$V8m0RS-Ӏ FчtZ )W y*j<큆<7vQ TãCL%+9=HEbxRw|U#D$0"6E&lG>J8c+XvE=3AZncAK%;C]w{6hw]U#@h\IԠJi )d4&jռ?Sb7]C43)PE,TӚ@bAh.JaJAjc=$c zAZx7;grr$~XU "MEcH>AIyG`:V $db^(}{ً7[3ia*Rz4,jxtߌ^ߦ:IK%̀. D,zlp :+: ٧[Wz_|S9t&6_mЪ3my:Ŵ|S»>.xCeoGeMVUoꅪfe&#f6 %ALW̫")4w"I1i@u<_d0:?eHdz4 ȩ_uqb!aN(Esfͭ$Gdc g2]_wҺ#'D',5Lb[0*eˈQ(]Gq`N=sL(Qm[Ҋ Ȣ`;X+\$JKFұ3tv[w!exC:| ;s<۝f9M>RNv3j+ sm$53NՒV6mVئ'շ<0s>AmĄQ|&3{fe*Y@  AiP/5έB:ɇ%zyT/偪Io2pr$H6.bEeRieq"1V&qmJ#QHY@l<)Fc&yeĠchnDt^vN_@켊kO=m%fR+1t%ۃɭMS"&I` Ť5{uyz;xtE+vle-ryn{wBZ@^i}?LF-Ccn[Y_# ބ2d_>ÝQoZ[lzuӶqB|SɄ͎T*,mB/P쫦0 U2n]kԣ '}W*Ky]/al]  0؆hLsO1@Aє̍ty5r!%9eQ8B6>$ @0"p RG)K#bv`(V؄ 1Hь"2,Y=!"=A>*X{))@nНb΂wDðq]tNRC8|sKKz3M]+6C+xOo(+ jx0MEO=Oxta眃0@0/0J1gQ }q8jd<0dv召9_Y1A^D3V$<-<Sy,NFYLʂdӺ@4mQIPE'\PFS]A.XTlXj"eG7fԏdI96l# ?/S"I;m:xSGev plEW6ԭ߁H[̯@ 8{.ڠD+7LLD];`U {"- p8 =E@d;WnVh}bZWRhb|B7Ȟ ȲP"tLW^fymuҶ4q]<1IHJ>GN>(+n@f+WefY8%q"o> YVĽNeb~<[,Y@o޽u6Y5Ct Nc&ָj+jw/>G1ly>J+x(! .JŭfjdRUU}f&}q89Y$T<ُk3~[Ei~0yR&C4eo%Bݸ+`ɚ:s8l4u)a"EŃ\Eܟ+de )4\}B8'<^^E/,9 = j8ź0R…1'ȍ¬nx6(]~Zw7 B0c'`i {85a/?>1Q7CU޻?">;;>qmP23cnG$ z٫M{z9Na o{/|WjBld:$x\w[,>`ZɒLm,uET,JՀ28yNVdħ'=cjJ!>&)D=ZE+ p. kWmdeD/;.7y:Ph%%6ʖhg-K!dr(%rp2D1" B pR9AdzM@z`,Ao[ضU~EӫpX5$#TKc<-08T 2*^96q?ʹEPboC|DP]ަQDJd/ξjd652YχɣW7-ܗji zS_ͳ/-,{[ǛV[uPtcB~R&@YdDH,w>XK\U+Ip {][y-\\Us[$ hGzpU5ZjP;ւ:\U+W!כ~o_po %^|3x6e*q+GWP+gK/X}2qk|97)$͝rQw~eHW9hd ?x4~ }d+-q|qB+8"[8[?tV{+OPQ_}uu}k?g2}ĿEWn]{iXҁ:Yʄ%&Le&ᦠY:OO#w?vWH1=eh/·3d I?}gS$|+\ρ4W=)5p4S|VЧj傊w4œV4̗߮5[_1,ueУ 1R(z]vvg{^g{¢]uƖ{[=3O-ٲU"$ȠZy-B.%Hj>blHR+67#l)EM e6'@BRVe$&Ac@o,H?7̜{(+,Lo<|ɎjYxNW\]1la_[ófKd~5_pCK(InMJ`J*M:ٔa)IDV;VdvǢ"% NQ~f;"xؒ)@i&EA@0G'9O"%1LV%xڶ"x !!ّI8(c*H\SbĨv(\q9[gL Ok*vgKD-69OH K*,I/,,yR-/qWò/z_tz[|"~po;d2{QNN _% ` ujF7Ob:.ٱxyǑoDE ttSj_RTvlL(bW.~kkyKc(jYxp Q 4@Q2%Hcq&Ҧ̅_1v˘zpzvƫB>v/OnJj{noMQ1YooW.j۽":Ťq;z_ t-LJ?lG~y ~fgLC?d+9a\C@֍S&F%썿H5Jw IAdGdE +>&z3gD#fP|(Bakhɘ8}d\-Wʝo.;ߟy߭VЛ R0g6{g[b[4CTZc6 {{ {?ή_^xgk0Zq>7QH *<{yO3"v7@0jCʪC#)3)z9ocQ֗e:xW\ȸl<.]6~g:]:tTTbFjI9Ya_"^//ހNVЪX+)hU HvP QUAUƶ_Q4KX*6 IhH([P*&ePR!riPLvQN ETu\@/,*S"<d0Q0A嚙~QGI𱻞\}1ŹvŹ<W잯=hzw^Eʺ;9hu9ɠ\xS,X9! y.DU"ƕ&+w딙c7cuo oױ?^Ⱦ_j@CwB&ׇQHZN\M>> -bzKG9Bw!;If+(\|;  +Y1z d`qҖly'vN,LRHf~޳Z!'}sI/WWN#;:;rk N Ws W+k=w$Zs|ӫiiԾ ~cP?ɖ?nc˛ɏǒ. rU\Ý}0Zڏq.]M箽d3Dz)*o0L!h T4" &;5A@aPڶkk-)Dͥ2ΥR16 wr9iR/|9둱Wi'gl1 wwkZ񒽺?q{B/.]Mo8bK4Q!(RUǂa,@FXKB.䠨XtnxX\ŞOJ@ TM06 UlaBH\D1b73g=b˴ 1ͼc[ԦƨM#j v%OQ}$'  N.F > )D㖷EcpZ"+dfYEGȖ&eR*+ Ga""9)8kIX ɢ*gB af뤅́JZo%ƺu1"63gvu`\vTV^-.Bc\#.AIN|Ą(rp9a^ _= *S 8JE~]PwlZXӛ#׮kcN(n'ُ~ iv|nSIW̖:$;VY-N*on7799Husfm,zL&|'RVL  1ͱu[3\v\,cV:f =7g7riӫxouݵDhus7fX*[^٫y9]39EYX}7̎Ϝm>d> gmx󣫷_&>Qbȃ fέG QwWne᪫X˟~"SV_O&x3\Bc)W)$lZ]yoǒ*`=ralM8'}J|I.go<$5@rSU:Br5)t ,}Fcd)7.(o5 Xyn7j_er{ q@ SmFp4E4hB!rA`JG"\/.  E00"p(!gS,eiB[@% 2:a+URrlPgMyiJEnKz$;%se<m眃0la^:$a2d$Q)DŹc- l~{ѳU[<"IfqL8j+Œ3 :F S7n (bxc/y* `ŵaxȒza %>FDLI;ԂbY C{oW.1=ӗ̺ - ]D  6.>^jC'n~g߸T;fXtP'G/Bb*jAVyqNE{1~1~HXkY n1g3>%f'V{V6lN¡O˺O#ΉuO}.%o}ӖtS]E)Q;>]s* }f1nz-߲8iWJ}|wyPͶQ7>;˝*9c)T)%Q܍R'E9^WQU:4?Ei$'Oj?:1ofdwf&|&|f dvp![1ܬ37Z)Y'#|lj}Ҏty~|y}ZkėXl: ·p'<^G /.- s:{xdJ 3yea:'n2JO~7s7jH:0F btB<'hl m28|f ˏ-Ƅ/V > ,n<5CW ,)㋁Sb1ih4AIsϕ<"GHx ;d/r5wm_l//ߗie|p3I{oo;7Jo^ߨqoI,~RmӛD~7!ϷyN[fu2 ({2\߫_W4/r/LBٺ#ivEK LRӄlx [vmQA5z,eRG?%{djif HDa;Zwkv8ܖwQvmHɿS ym4IQyU& $cj38H*{+M+C^EHeZpOĨ "J)pZ;#V""8CaՌbAK V^Z.^E/]ɢ848k98;$ttw5E)Qߴ'ԏM-hR؃@Uy;_0-R븵!3tndYa +H-;H:H-;Lv"tH)^#K &rc+ǀsP]i9噎hU*:f[EĖPƽuH ÌМm n}K M>Fۻuz'`@a* < N8M&{8󰰌"+.;5;؊kk.䍋mq\)VTxGDJNcRRl LEYJbP=!H&>qrA%['챙u4(T`CH'3 <Y4Σ (2"~Dl,*y[ӗٚa&X$SeTS%RF J=#HV1<>X;E5Ҧ|pZC=[j+ pq5%)L$ pDuQ2eUN2 Fuɝ *סv0H1(n+_&{+`ob2A(l.r ʖW^ܙ4upAl[ei!/sP1~zIqh;XiL麣Rd+u})'bJɔmkj/Tz|V?*UooR>E!3:]OFgFA~F~Ye|U}i2;}Y6,E^596c_Q)bx?~XIٿC?[5 9M>À!Z=٠{pZ||[Y7'_6'|8)mG&i 2Ϥسu-}sk[w/@2"bcUygYnoc11 ~V4FHli=HU6gobYg~׶R5U WY(̸ ^ϲk\8 us nPL.ߛaXB)9GvMDr U+.ۅqy vڷGfiϺeT-œ[V+Q^U>$xqHAĀnvb\M~;A7,;uM~/}ܛ|N"%xhPHT1(A"N_$8MRЯƆa{_p2WJ^rϿh*츣?:HEfMiX&C"F1SOu5qY{2rJy#zGɱk/nU ) )`W Kβ'74e.A~{0JMN+?"P$un2z , @#2S}V#|djP\_d+R ^J|Vn,N?>;O?ӓ{\(kS@ Zz${qhtӥh{j;] rX6}[3@lh-k.T|P}4ͦm x@r%JO!sᓊ!"!H>>sa1Ca1ܢ8.Li@";kpA3(e1ު\9a9$$V8m0RS-@ FчpZ Suȹ[GmAUюzh}>++B[C1D%sc&9)w X"^"v9߄ =tS"aRGN Dg&HMt,h$N uőp,찊%%Vr4"JFc&X[0I*ރe$H c,jٶmR1W4er-eԀ7}(JUwB0b$ǂqt-")h\ZI) 48퉵őHĽi=RV,JV0Z%+!@ 60KT`5ĬQ[Ҙ 9a36^*m4UUHQ+6*2F}7MtP Ft@N RhA$$X="Zhy,`Mlu0>)7iaKfzi0VrC`IDiLX eiټ+n˭--#Pa s; s0G'u\3)͙ƳtB Ξ^LsѴ<kv,MŸ\f\|:ZYK0X`"k"QK̢ir4V"FMvwꄷXǷK|9w|_]KmY?[Ֆ V@۫0¤ݏ`N'T>jޱ{R'v*pyǪ}ޱCU]koHw+ĢZ4~hfmnn jdQ+JN=CR',tLLG&G\393=+ 7tRJh:]%Rth)]`Ido*e/t uJ( CH>YWXqJp ]%Jw]=∡o j0ƴ7tǺJh:]J"]IP7]\'kiQYi֋(cCjhz‘>+aq:b tu`;8SҏCB)4+<]QV*V7tNBtJ6S+BGt)!UB+x*Td'HW4{DWhDyo*e/t uJ(% X!#UKx_*}C@WO8UCW V}+@KӾ?]%l'IWB+x!^LovgAr(ݹHP)o[gY_O@+H(t?)~pU ,U윾7J>O&l0.m/9NB tHK7#R)*գo/9coZ>-C]rKW~:4SF7t$@&ݧE] %7qc;UuZq(ix@+2]sD]%\%BWbuJ(  `){CW n+@+:]% tr4]`RJh9:]%bp"]18]%? tPj>+.@Q|EJp% ]Z:OW %]=Ib+7T[aIgswehGTF"Duo^-u*Jհ7oy;gг=zĤb[+=$~O TrSy]pEX_U<=:/a$Owks7~,o~0kQue9/釥!ch((kx.FYYKHZ?8}9/lHviu}Ln][@[L(#]ǫ2%z}e= +]):<:Sl&2"XQ\鮇@mLh]io5c{%(lދb|S$Xpc8:A0 si-"'4'b+H$H5%Z_K1w=S܇+vrPt C!a*-{cO'y$4ViǬQFYJy>%&zqyvVmN_Tll|5F ͥn;h5~ulyft 3BX+kWdy~M`KXrMH.ZGu-6Q-;i?ZF>` F#Bz%Ɗjt\Z;/%S03*u@:lUAU ;-0䄥 ^*m4U"ƑrW^)A}mTei α RpAY,\`(aB ) $0$X=B# V+烲<̃+crUbұ}JSgExr]kr:{olvdz=id~GjÜ & -LqIG4 Jn(L8(|*>k{oƋ\<`= ܤ0Oܷͪ C>l0rˋUSt?MBSgNэ.`l w VU Ao~XǓ ]3j&u-+[&hd(wYUU+$cYYI&D0Ab1+I x% RذX.~ĨLr[\qQ#=ھA7'fMcXdWf2V5.*Fyd>) H儽rD?DsN+s(I(sd/ZyPoȉR4gKrYw 'tGc ,90rs8/~xDK!<_ K`BG`0MzlсW.C'="ak\Z2"n&x92J#"( - lH3h$-[2ݒ:hp~7GcgV&9\`$sfq!XB{_\kߦfr|};i\&D~?+3?<-Eyz"sNm8R,GiX@7OH/AniyZuH'H6LQLբ\R?@:fyr.ּJf2i,|bdBl B+ .TY$Ch$*Os1xP:US._<4紼E~oe2[%s(7p+ɛ· o y0/=Vw'm|Ș/<nIUoy%/j'wj=4W_=4GfP&^[- ;$}s|G"gW-m>_+2w !o%yLpÄGoq@:︧λ7{g7%EVR ϕlθ\Zm)"!|<@d*DwF buI`xGF םh,`1Dq`V'MҸ?^T#7#3G^2G}dvLj)i"OG :Ԃc!ַ, (jO#/Bb*j8F"Ow9*~vT> FH^o30T9%f:{1_S֏Pqd>f9+VCeOΉuO}ޛ:xߜ~aRxe)lͤm%g'X}UЇhþb)LoUISɝjߖAۑG_KZK}t*өRbv'RIR]B&gn^lS]@:9> Ovs/)oQA*1Idf/ޏ.Գ/G̪)!&b~~nt57ˉ./baQf͓Ůw55*Olȳ]|H̃◢ c'YR1/(Sj4[6\/j㫆 nɛ ١Vs0(Xe=LOZ8z $mD?];M˭f8>??l6H KlmC8oN$Hj- ( ' L(I8(trҴl/;UneBz n&T`cT^+mZhX  wmY4ƘTA@ݵ$73ZNkIʶn%"E6MZf_u~JZ( ׉Ky9}ɜo9n3 tt ^Ws4}8~+ O:DS*mɌ_]GtKn60~_A }|= uU@u/6-mY/`}eb 4 n4B{ԫmB (@ `&{$~vdg|ý'ZI/)S<'* 39ӟ:n-%fȖ]%YiDdVX>l)l>=("CtH)#K &rc􀟬Ajӆ4kjTjqF +7‭iU"bK(F[ :aFhNPƎ-κJQOryXsm~x 'Q/mw =lc0t 9;25׸sZJ?8Y1Jl XK[.הA6!ko{.Kuؾfd=Z`άW6YCQRfl)Bq0Q9KWR&o\66ݧ 4_z,haiI_` d` ;KTlag(҈ "_jR x.GeJVTxGDJNcRR(`LEi%1I$O^sP Va ឆRka])k") V;čtB8Ø#H)X;E5ҦZC=:W [C8_ݻ-6q.c>619ӳ_U,P:pqQ]3^阃z3Zɻ2‡kP+@$E|)O؊C9 ʦ=/[g+o-XD4$UtkaϷrik~a(ˉ_m<ۼi;8o4t2{u}$bJɔcحplŌ}*~QY"x%G[|@\Gst1tސuxSrV\~8tV0:6{_e?^s4P/PHuo=`7*agl'o[{ٽ'&xS4_\ W)&1Dgvc{ uU6]j*<@e xGGL(,I֓T߾)Mwփ!j軭15te _~s>2.닲^ϊO-=~w=O4+6)*1WA19m6 >~41,ɌQ f(Ӿ~@bȔ6OgN | V9,!У&?j0Jk֣zF5^$ְT{kUnǻ  |\<)`g'p8T&fm({oܛ|N"%xhPHT1(A"N_$8MR `}鍓ZP,=V%tQl`ؔ{-aB8d9F>6dp>^#uk#ڻGZw#sS>K*$t>.H;qAH#ɷ46*,ޑS'Iɍ%?#.!b9;rJb{D푟9:8%-XvspDc!;kp1f$%Q`cSЗ|9HHJD#b0A*x:(YTh`)zfDǂJb.ߡPnU)X;qiO L\Ed9Ӏa0Y!ґ7: n9z 6bf4r%pFsKR1e34H%xtMH:q4ܯ$ktXcI&>Uγ:+|҈|LλE /.c3#G0px@`%؈ b R,# T3@v"5z{ s=l/~Bf.efG ѩ}~7z놗`}@Fęm3d8:~=l,M'}tim)he*!fPPJRAuȵtyg,Dd Sxh=VE!5l biK4*A'nNjsRUVlD X%/ˈq_Of&8)e$"_jY"M6 o69w̱r2_< l7`>fbKTT''<e,!]Zn5%'1bePew3P9<eUT2YvwGd^r}r*iPO4 &k=7OBGo;,~Z3ߪINMzm7(Tn6^񲺔>>ޫԚB5wʇ- ^J˟pƩ^$A?9uIa 5 џ^rFtŀ͝V =: (V僵b[$.) wlJaJ#68R$#?g)Oc.*5X@ N8ea*YnSWǻ](fKyIHFr@.TNvJ݇law H_`iJL} q_v ~fYNeGVt[Ӱh8/^`0T~-d덕WfJɴφ+?\^|g?T y^Q$ZFfūP>#Oscۗ'Dsb 4e#y""[vqiؔTUtqw#ħrXOeX}k4=]]o\9r+ dYbe`_vg6A"vd+c+A{-%Vk}ǽdKӷk`:+HVEV1jY,Dtv"P䵿R]ʗ_䗏._=uo~O]VK֛1"^.nb]!a~U9in}_-Pg АiI`ZwIMkwƚ&zIxk B}8j27?wŽlU38|c h8;=9_~wd?  <}'CI12[*~KXᚬJHΩ$Le[ECm0ȧIJ<)Np6)QR*܋EKbek9'c[ԊKkH@JzH5d&"rm;-@|EcKC7>ybGk H !`AT<Ry UZ)pE 7g4UV"bER! .% n[6{k]fT-v ~Sg}u~{v{Ln 1"J\1EQc_>xo^Dz h<ߟwa2z2\Io9杊bAYS}Uݮ:ޣ]uCΑei0hJ!NHR+ĔgyP9kk6Zm I|lbSQ4i2*eޜGV[?іNe%i)rL3TjX+"bE";55nf6ܖ5 X=Ih5Uj2:ؓFh#L;`pH,=!C<ث ɎiA6-O/s(Qqww9獉: _R@vrvi'~X[dsݻ}&3fILU LtbLjJ.!@9cw;l[R2 PU9|iF%F~(6m8q82\,cW. s!O\\xcW/\&7qܣ+ؿ~~r*+r-j\u sVY`ћdGT=TRmF8$;l'Kp׎ ;8trݚ+^eHy_w޳mË}LݛtƩ{'I=:bL4qvrT/j'Ci%%4ߖ*ɵJ}ҼJ%F%7zmaRIUɋ/ k.iZ䝄ҟd?DQbBO~;]= pLW+t ya(q @W8ծC{G#+GCWcy骣$7 + 3"YFCWnpc+Eޫr-~򇫃Ux4tp=:Z}DW/(1 8:0`Pъ{(&ztXܚXx~x( f>;%|ڹ6rͱ1 nn~r*;|ڮ[]V h@bkqqvje6L;]_~㫒ZndR켠@8;9Ak܈؏Ec?;Z>vnz:Ov<&hu~,tцRdHW^DFT1vӕ3=L}tк}_nf`!w@>X"O~ctf44yFL\ӃQ\0AuRUcћ2zꡞ6+*[aukk6S6b펶\[ӺfݱX y|ޛ}Y2&Nk>}ѐW_W+gx{xoa#j Fu֏Eu:JV{Z͂$fDteUK -;ZeG%ѕFϓw?-9t(ݚJ[Uke,t>UG2 +g5#+<&p쪣('ztłkKAOsrYƷ'gz./׹sLLDګǵ]z|͏jwߪ1;y!R M"s.rw:-);)͛vNh\ 9]K=jem4O27jd11yifiNQx.4;Eb+~zcfdwt9@}aKch:`h:`t"Ѱufm`h^NDdďƴ~+E{Q%- ͎N.+)٭fUF٧ã؋*jPs{&:l[O}vx(˜]7"xyUA~xm;G2~QSwV±#/UDs7[8 !,z}لp뾛jyp *zJV.o}v%ĕ.J]lٚY] r*i;Zn4=#%^4rssޡ[6x~nVoZS{'YވV6C kxj-\." TlsU!äJUO*P4I5-TsnaKA(306kԚUb6Yo0eZIT-fʦi9Ũg_{{,Ԙ5|eʑZ64WFIH6\AdZBJMJƨo{ x!^0ւoYY.~kBm 6;WV%%E!%VL>Vfٞ77BUs;sT#mPiJrM Ek ^'4-HkڻQ NXbr Q}\uNQZ|: - hd7$X)+5ɫ$ɔpHr։˩xEL`՟3持L1@>$kj|ņH#*c&<e#XE;`Оbuv4jBIҗKIteJ`M:_&}W*7 XHrH-Fq݊U%UT޳Q~Ts 7}cGpĠ *QjqV'.+ꗒBn±D5mfP8M`uP6K#ic]!] Ds\lGET`2d#%J2(F~m*ngAE\1GRF;*] B!Yo͈Rl('/uVk(ĩѐ&Ga]8%+!q'P@o2NM=2ܳ5ѣb"`1 pB;e,ȈEbҷ'41%Xе?6yr=;^Ƞϔc+aP>^ݝ#.`Xf&HNO:*dvxE#&0?"bio&-%[v兜K/x_LO~ 4}L)[6SfP O7|PC.I>0ӦAf%LtqrGZdEbX-OHv9!/Rh{E8JiI/h2 2|Gd:>.=)dgD;O]]5ێ("7ĝ䵣mP8zL,tJԈYߙynd*`F8Mxec:Fa돛ǫ |}?=wy )+'Sk;=(#C=եM]5XJ(/N @oDeRሾ w`z„%JMG"%4vRe'C0b4&MSz|#; gF IԑCF)l:'#},ܡd@(ʀBm3zw7 GĢpeXνLg>;ZYdBЌۉP~J?X+3'4I=9 43+v hmR? ~A3{=H4Ғ=I%`-4a$t_ϧe%aF0ARD.&{D(|]l˴W=st&]@  p'na hA3B*#'M=6;S >3Hu\uq[dlT:4Eʚ w{ЛRv |ۓJ.~JF8RPR,hk~z Rژޕcٌz9)ѡ6 㕚@X'z xfzT%[7ʚɘlI.#:2GB11=~g#YAi\f'S,! H}@R> H}@R> H}@R> H}@R> H}@Rn)BpP 2і> H}@R> H}@R> H}@R> H}@R> H}@R> H}@k>bݎrFS*SR }@du\H}@R> H}@R> H}@R> H}@R> H}@R> H}@R%]C> vuԒ}@H}@R> H}@R> H}@R> H}@R> H}@R> H}@RcKj*W\?nN?uqowoէ˛X@t-ٖ vlKNflKPc%LYmKk-aQ?H Ǖ0y'5](oe1 3"+C>ؘ 3m|QTj;nL֜L~~uz0-95i} 30-j]ӢbzFf!\M͘6vr\JU巃ġg"^W{C j\ૻ}_nNPF9$wx>om6ĶKgQ -RN?EpCrifvB:S{d40k97+Mڏ /U'y5NW"BM3iD/B-U'ĴȍLZ9Dsp bz=p%*!7jW_+QIZU~;Oxf ajHK͓S~Y*l.UT\=w'n W7+C+ݺW2[ q"xe•ܶ+eLu%gM$_;XWEd•~ W{ D-q)9 q?5+Pu%rSnWPkM6(Vmz_OW7aopy,lq}e9ONmۛ.!+s]aC vrz2ÖSےr4̨F[~}u1~/6۟_O/^Iq0^NkގO[_ o3YlOc~Qًi6+/ xf!*?curWl㖻aBMCqާi oҎFƩ:XG#*-iGŽ&&67+D.Vp֚q%*ƯW)SC` Dn;{Em~5^T4՛*xx\}?eQXIΠN3Cf : 5cqqRO>ywlh O<9]G#j)шണYaG(^Gv l+ƋJoW+U)P,~g,NfuS0 1 rShfYj31 lt|fos Js; ѪWֺq%*VCgTY_ģjܥ >O-/dmebsȖ• DnJrW'cjr)iW\A-q%*(V+ 严fkhs+KNaNqB\7,)Vpls!W+U|Ks*Dc<+6֎+QZ%bTs?i"WVMc'􀌨4?!.~z!DJ&_;D%' qRα%\A0 DgP;Djl J D.7+ fPTjY^ DphgJ.|L)Վ+QyWUk䀖u]&]C7nOoLyћf+<^h^-FNQQ90qavw/}WfyR3e;Se%dg*;lrC7+c+Վ+QYOq&"CYSr rsVpL"W\W$cC`i3f LWy q+mY`쑙}`f'$X%ѧYԈo}I.Kc)QfW}UWĮ@`'vX*I\ Iy`oעHfp֗XpvLN ,<Jy}-*wpRL\%:Jjt,p^2) N-XD .0h\($^<"NyRX`:I9$>m80M8UлL"ϝ v=ӓݤ3 zL a* UWc+btp$'PN8"JaW .pc$-&WIJOp3cr.b˯Ұ_~aNQō^AYVM~f}W%>wy,"6cRF3&ыNH!u}W4m5p32pƈ 0ёUJð;D*B0R٣EkU`:GW9vWXiC`L3i׏?b 4ovydc1F13ɬ`)$9ܷ鳪yVӿ נ F.]PJ(>mMK{8̢je}"eNZX zƽOeWvhy.V80Pt^7bȬqJZdݜo\&7fyag=z <8ؼbIoƭeXl5gKqVi5`Eq]^IɝbP9iᚧa/y3?A_YTP1*[(^}f{&G&qwm\,5۳xؽ/Ws(!c޲L93X">qM Nwb7tChMȷOlmp=[E@K惄X^r,"1JY xFXEeIuABN!B0 B$㑅0b}07VED b FрG!eLD aclA5. /w#a 7.;=77Ca=33u2| ysI|ՂrҨfsm2\"CbA(|{aANJ7jNcF) 62 ;6`#*$;A Ut4l݂W ?Q\㟝;Zf u8xTUj!2pWdM܋#ܿ 9oԉ1t47'pp2.AjDL=8=+NX 0/5=LG eZA ~$HZFL&kKVHKDsc<0ŕ^Au_⟣q՞JHjpG B )IFpmF:$lV&H X@.(|8} L{bXXHYga^tf7g<6d6EaUwѓ,*e5+)eɚfnwAI{÷>trT:v.!~-5%#/&O`:P0SℤiDPp -V1hFQ(֢)brK%bdF/&me"tidlLWi ӌ}P5℅OSRFČ/VT,7W[~ȻW2巽n1k"kBcVxp(v̀ Q+<4tAZ= JlR!`V0/2 #a:0- 9Fَ69PP1uèO}by ܋ DpdPIdsF8xLZs (F0r!c20Cv4HJ@@>r8ASXg;N}~î 6 b qV OF%83ƈ]Р\ ԜpHt~0b\}YD #$$8pFBb%#1)@:,iCO0I̥ #bclGNEV!:Ӓ}q6'\ܤ`bJFDLI;Ԃc!P7f!=?/uZه&C"Cۄډ5uv`;_jēׅYac_Exa|4Oct)! 'GHH\z-81:E]iz6}OSb}r x~v{]wךY}؋V+s"V7oH8*-t'mEhuGeO18ozb.Mrl/!qaX1}eGTDNŷePQ>G:(tX?)Ticʔ5twziĔ'Ez_%GU;i]~%%1|cЮ3%mg֟m >/ՠ^v}w3(V9LXso5"&ۧpz"bƣqT?}keR\{>9 ,[p_]A }N89oE/t"W }?XTB|Z~F#7K"]nUw< nA1:'Xsxvk6i)V^%V/>&xW)|cw{)yaOU.o|YAI |$Ag{#"[6nU~֜ ɚS߿]~OҌokb]ί vJ4^2 o#"OŇ]o᝘K<݋l_ݔ1Lw]bÍ)fuXO Q f:\;[\,t=(w0$O,R kB` KmW-KA0kL̬DUk3wue;Na9G; Dd97rR߹T6Y,tz|ݳ6ͻ++u&F),Rx/;*0fpT" y csػƨ "J)XZ;#V"",CaՌbAK{F@/iWYFnL7?5 t ƒǪC^L7/Y(+WO+i7Jp6BHpw"\"Ҝȶ6wbqW9$ܡ7:IDLPx-&#s۩~1 Qg>/l1/S."@"|򛝝:jU ǫOWke:« F+LIj*c;FvUJUusbk ^j"r>$ېN{b{CdK5w̝P&ؘ,{dFREJ* %&@ m-\4qJ+ܓRI($ Ys,PbvlJyw㚓I0V$RhIN"ˇ(~D Jw$}IJǤh,D;epIBg1'Lpa&yLKAextND@<Ĭjv$kN_T kb{BRIQXZ-sT JܣLeYrR=c F@#uᣌN XTAO-m݌Y꩙#FRF2Y)=hJYe$&&= ^q!ʊ3MrOOBӫ)ދM0fs@`GT`fǹ %HTSRz]<"WJ;{FjrXM?sz<d4WBɵ%t489@p ZP Siaj;2J4<@"@>Ym:i:DFURprҸ޶''%؄% b=9o>-z7WyI#6R-Gx~ ?PnPb8#~TG.>en5{ʉ?n_1]< h;:[i.2-deU C)lI2#^w"^-JQyj-ʇ&Fd.Nnߋq sƅS7_OoƓw˛%NNM4?V8Jui5FxN[uqX@l;6^mE}4k C&kE҇5ﴬ迾G m )I36b-p9Nf=ws}wA;þAl+.UYeNx[,2m?bٺlI_Gw''O:A[]jHTҳqI//4==QyJgDf paثmnÞ{\=!iE 5u𤡈2:#2/_%x6 G@jŽ;Fd)@J Ib&(c"MKYeҏ'=9qFj3ϧZ9ꔃk4qG)u!>T>|]{}V.HO₤V< lT;|00J+/3H Z'rJRwJ3g3gSyZG & HFRHt9 ANV8xJ:**+Q'V>$փ4rBtii:9ehOp:`~vopzդÏ~SD4re:tLE\YSỊ6M;wa*͍AeLȁA\6-,ڔD'@ZȂYޛ63m,w^EAR0xWx!MJzFUUS%)}_58ǣ!ܲ,1B)'|1f y:\r.jb_δ қ&:j(w! Q /IKsɖZ5iҞxY!%ON*8V{fyfm p? q%8b^bFhrf%MZ%R"<|Of]l@L t+Nh<0'ҶubQgg%VgZmD]yy*zS d e X>Nj ӒI.|T-7VDAy!&8$or$" @g9*&ۭV 悀AL,k 6[&lQȁ{G? K?j 0 =j;_iϋtZ]a' e  m=y"CK3JLFc"WI*+PӤK_nxQ>}4&/7_/3ME4\k|oDoz9 W=7ZvodzPof5JBLMR?NTkYՠI^4]śEItZk8ttwnhtI^NOCtm{âLNo껛,_..φ :\_{fѴ|O%G*I7(9pmYQj|9Yо<^Kl?*Mbx(QsofDMa\Vk8E6{$ AC^\P\DhI2't9[` c1< 06jVJk+J+%gES'|H;aR_/&}68 ^U FD^_7]~W6O-K_>i(OGtWi`}<K}ҬwhRWvR>ItOץ׏nA?1ЊZWԺ~ҺjZs4=́2C?uԲ՜Zvyps{=OxOW| ͏G8ʕQ@)০"L=起$/Rlf?O&j珠wknnDIbmH rG;WL8 sDR B*IKVNuKѥoyӟPY?*\vYj|p99; mgǫ H"lnES;CQnF6 B Ul*f$ Y&&sJ`pKDB/V_Mmq(ye5|M-t4|Nh{SifZS5:NHe2Ȕ#a/Kq˔ &6xNs)UC*J D* }6HR#}`}cHW,AlܱбHqA3^]fE1sj(b-$Sۍ`q*bLF ơ4tBXsnA+PXvw[9ڥ $_wMGƅ%u8xSxd`2ѣ&M:D(?3`zU]l) >,Eu95έ+φ{>{I(ÙIx`L Nѷms%epDQ]WrVHQ2#!#'! X&^83ibY;\v(Ye>km8GY*UCt+ٻJE`JdSȼTtgPYHXQ )k?H!2dUmB}_I=s", iқ L9XBӪQqO$bMh 6v[ٳG{cP.pGkrWP-s"XcBM2Dq@Y8$&Ol?Xb4 g#a\9s+3UߒmxRgVI)Rʇi9E]ߖ=]7iy]\˕Zpnセo$sGxJ_O$ST1;n 5pkW]hkqiA9V"{<oz93jWW kr0ؼ&B}7kh*c?dw;aom>ZA%}f$]7mi){0(~[}S(k%i߮3RdRzz]]L@{]&7{oSkMF8h7M Q ߯Fo x\]4B8FIͽɊqy݇SǜYv|PP,iP*ANF4^"JOQArʓJ+ˆ-)r y 4OgX=JR/O+th\1Pϐ\8"MkyZzd)aeQ3։ e]vg͒սe 'Csw^'~HU-uz]mλRro[:F TF Ԉ1P#j@Jv@Su;n퀺Pv@u;n퀺pԶ+b*pQdO`re^\0XQQ4 WF8o4h‰iOUj%?rU`UGaJĤAB*>jAŀ ӣ4!C-GfL{cW:J&fGZ*D Yg:KYq\0-&ΖZ>&:`e٥]svK 1fZVJTJS$0 Xule(h)Oj@e2?K?P@@%A8Typ@aE-&sfMR?ٗ*PlXAʲe$5|x)YqNλ53oҮv[qE-*Aɬ .%!@ECDB|BSoJ,UF$RƄ)C`H,LJUR p8W)W Ma,T ͯ33..vEz/-Gm_4hh44w$6i*-ؔK*+͐y9s+$mQ-j|ƞ pg6ҠW"hfN Ąª1Ц0bg3b831͎]QFm[Q2ص/~~}=b̓ǒj3-VJEoqXY\EJ,ZGZ8KiI-{ =VX&g-ppy}sqQ"ߞ1p!c#HsjVL_V`WYiHM6: Zr 1"$2)"T$ FJeO|{Go%LJqy͹Ry`T?B@hbf_#.[}szm@<Bs[ C MRQC).pS*_E5,﬊oe.rDJp#3)7$(LzDJ}(&f ~gd2O=[wެ~} {ަXDv GFطNYƍY#FcyQD X0cx#w5$O [7^; LZ.J3`',c^ZA'!8s=2IK ܗ=|/%E;M:@>$J")!Т#@= Udk˴ٍ҄p=FcZr7;oZ85qe->7wvhJ@_/XNH`0nO2Cj查:,*n(HuTڈu:Ώq TfQSL$Ob5H \(Ӊ~,yuYٚm_oi4W^D=%bbjb`{6| ȨX Ezii 8eTzY̛\ތ9@:DBIu(5LH?)LSV.U-+Q{ }}rMIrr޾r9R\+Z;85Ssb_H{u͢q ޶Yo~:Ė \S۾uIw7w 2ߢ{-χZ1o3J@=oxpGF MSs5UQhߵyM|:y"ܪm~ ۜ)KdqaEۊ '$xYd*5>md 9,uy䖺vgQVIs'[ޟmV⢤LNP!H+crJzaeh82Ǩu&qy./1r 3!χ*)Dj@4 LaEjB:&h`fb Zs&Q99S1;OR%'12"a9wWz"-<]Ob?_\`͠7=4L;exhKnd?"h J1S$9,iOzP6T)<"T酞< yTF@T Bͣ*9MyA ‚"Ot(GY&c p9޼k<$AjT} BJ;E.Iܣa/GHLUic->=:-(A2$WҖop⳽R  l ;{0T"BHT 5B|dɄ|;X"cXI.e <ΥE W,d!IJP@RRh[%QM L=elqŧ)qcpYqGzWt2—ݚ5vl .A>",>qȟ]BXDkq:s26.OSFݽ`n=Ry'铆!&w=?e(o7vşݶZ6AlGHu!©pʁߏ"6.= pH39N"8j yy ?{z%#_&.1pOWnkxgО񟓿i}Hdv~r9ӿ/ Oxӧpu8uS.67o;89[^G@/ ΦYu6Re?0ARPTF /rl&Y >C}`F9tJRT;sF/>.l:1On]4w}Nux;h0ڷ 3UwWV׺:2A"֤95r RĐ ZA+EAK}im_t5gb?a29b`&`F,F ,Ie$}XXB7%ֹ e2H#.E@MM uL*&!u͗9Zc9,Ԧ=kA&PӒҔƿ]Wr+n)"YN6юixEg2Uy%,$A sd(o$Fb,gD2TNˎbzӁz>ym^P +P1ZBX 'c*dgL<9@ Шqdž mcjO'=Y<ޫĈX`A2C.ym0:$ucmBl)u u+HlV9>yLZ D܃#8nM(/}ZHyj;3JRU>OPzHPjO84:5C0CMJhԆ5ѶQI6!CV\wev4.H#mSI^ZkIͧ 66r2ePcoX؛SMv|Mw}1 Kix8m'#a?Q}=?T Eؠ1{utu׸-(S)VWUrW_~_Ҽu"%svuk&'_oyz.n~2=uyӛt6HwυuǦGɬ<#5x7+ T-`ko/ roƓO7' kM׊ ŷk>Yuyso~:cmr;$:L>z?dV⹸{lCsv;H3F,22e[fQLgIOZ(F"U5WFoFVzؘ5*OVԅ/4 g.ӸO|AlUPMK׼kb;N_%88jwy]7by^z@(->eze`eeIzݮasnxRnU B!XK|ͽ 89'M ަBǟv[|.EZ,Y("/%r<vn> {/M:$ }B/|3^@rT`^cgN̆NzO࿾ER>huKX5 &Irk^m̷mCZ)L@S I/75F_LCQ̕N&ӫOOק?*x~EQi֚ŧs2U[\$KEΚEpe񿚘Qt<4}@YrH zz&s$t~fHkW;anۧs˵"E wCݭ]tvqr:^`1Q4C6x*zUg~W\ѵͨUqcuEتtrIzyZ?sW€|ɕ?PFOY\LμN ńRPbc(EbXbC <)Cr*X do E:tAc@8 (;QK(gtY`*XhrL&y̠"`KzPmȹF]x =cVdt8zId9e9v\3 p:Nw7! )g—`b҆eV6Ǭ1IPVrdBD.LVͣ32nzݳn$S!1'I7مBK.0"3\"E`;oenIr5DPS,C1!⣄''rI9 8O c=i Y3Wm-7-;݋^h 8/w*RN@5&ju(ݐJ/Heo} ǥ \ޔ%酅伖@ əQxcVĪLiKq2=Krg ҷ/A۷g܁sF:u}8ӋWt>= :a2aP.|9$,M̻0/In(.Um٣9N7eAܬtz7K\FկEz7{S|_.@ $B̘2pcr J8HD#[83>Ԟ[ z>SN,pv jhFӴ|&1\n;u:'ȈV(1[ Y,&yOCPL삏kg=砧?eʬaʴ`~ LØ0 ع C A,A(z@uk[{FZkrJ](=όq,]{&i`:Y4Lʜ=bВNu}h$!*'IDN22f"\{u}9w㪢C:ѡ4;ݘGl<Y*w4,2y%oQGBi9%k-6%wJ1`#IpRUAr;1H g/9 T$Mz^E- dm ^j!ݖr,;ЄK1r{4&imD&%m бuF=j(¨t/AA%Q J "hJ\NE"cM;"3"Х\~)?W1!71Iv,e 4J$̑8! g}b9Ioc)5gHr#da )I 4Cj2SYG'WeD;/nL !4&GJ&$,굆$-./II֝Gh+E:c-+ } I:VtЙͬ_.fCk]A !t~KM]w#-'4θ}FBxpT&FA?omTDC6*Z.mϽ݅6 -cO/R*&g>UeUޥquNו15(EUmMޮ/껦ߏ';`W[ℷˏM^A%vv- F`v@ZBZ%Z#zndy3zސrT5l@t/ڢ I1X5NK#%Mp#kk.*#dFHdk-4 4>`Ɨ[֗?~̔%WaԘK^M?%+22>.:/<׭ažC3'KAh|>*8MQ%t@+$vaHYN}\cbD2<46jM/(A(ˌ@S Cw9wI'N'*w{vlT 7NI77jF?۟;p&!M0BѩR,) '%ؒ,m9آᝣ/Av4 AGߝF0;]'~F, 4,\Y7,Tr64 YZ)ZB0³QW\)**wuU\w!ꙫ+ &A+F.?0zF]=5 `PWN=Wܢ~F+6ꪐsQWZP}WWJu+!+?LhOG7 Ge*)$^RQ KK^w5ʨ8ڕzOCaE=P}oGkg&oGڦS Y8+R%>l"rށVzjLFp`c!׵o3(bj~L>.ռ"N6͹^Ѣ1߸}wK0qOi;z~H[3fY>L.Ӓk tlD~"lkm#G_Řvpc@egw-3A@/~$OWlIlKm9խ"YUUEC ֺVGC@qjP!~ kv'T|j׺I8mS}dDBNTם~(8i>E[N<$M7yD#;˯y2ij|y|gB$Pzȥ|oVe_z%F /7r})_+#hgHZ]sg%&'uX<UͺF.zr6mdylrӾȘ5ymgcbם%vmJXb~ 6X [8Igy+ Pɧ?S՗8hkE:k vIMF;©'GM,}n=k]i>, '-hME{4`VJ 0Ք]f~=}v6گ7my-LO15(h1oT*,0Re/8S9GyU +@ "b ҡrFr&PURCg}nqwud 6EohcWsYyA `_Lx#7bvڸաJ>&? & ⧗'rp.9sNNix{ʱt)!.*B[[6}Š{( Zi$ Q">fr\lL Qht,Mk]C< ܈BQbP֣]N4Eh!.dXl/ű^4zk6; Oz ~ږ'5|d `wAa^nIsA6`yC6*8b.J*J,Z;ftRjΪ~J5E0 KY%jK0֌]3UӅqƮ0 ݨ U^Q6lxCbN?OiqȻ~7ttklDU1VErXؼbv lj`/cb3Z:xk3d :EŒ֛RX#Fv=Rgыn!\KvՋf`^}o4XBLdIq\lUj՞ՎJ1ER4Ћ]X#Ϛ~#z_1K(Aۃ`g ֎-Igǥh41?y=fb,A+)F!fSbwqC;xjC8n=U3FVŔX[qb?,Z*h[KL Qe24zVX/gM1g )Ȯ(f-ݰаw0rxM(Ϙ! 6J-Ef~eSvmt&m$[ rR6G>T\%*"+4Hو`(j #-,d !*U4nV,/_um{xweU iX{A_l "H'v}J^30nߌqYBGj[XhF,HH$oa1%&& b`T-(t/\Goǭ驶gj|lR`R=;lu2&buBKFAVvq,_A^9Wv^!9&g,FleAy՗OwekMCtu&eC# >i~%iመQ~*C)a%e祹Aο5h 4809taDÕ jʡuC y.&^-17e&B؟ĝh.܀EpB%C!;V6U.Y/6LŊU+kc8ƕ6=hdX,N14qĒ2=)lxo>rOIcl()\j!hRPJ %B\-F#KdtM쩣<Γՙ (tQ^4b "2iׄ*  pcZrϲ57Sq֑WvYPJu#LNLq0d/q1Ⱦ~#ksόgriTzxrQg3,x;'g~<oKu:٤rS֝3`.k܎R|/./|VR%kI6j}1ri F*bWRP#U&d "hLL\TE̾Wmu"iE!U?lUd#g;XbCcll{T+*=>2?:T7Kb!õޑ;hm& c1韏'6'WxnoDT7_jt9\Sи2(3|GVYGmΤ;LxƔ :OmǞ1 "@;oL[Yy +}b? ԝ@QӞ]`? &xRjMp;{f܈ 0ёUJ0E"8cʧ \P)&Ҩ:3~w3~H OSM$Xt +XR7}(ׂ3^xSͥ~8! fa)rJp\=|~'ͪlœ-׬]gYuvQ(+n@?+SfLӯoA|ӐgEt6db=˒Xdwc`y B5,![<􀤩rOT5 jؖx(%j^0"rX#`% }d]&o ݃{݃s!_Lyj[H}[U\7{H㥐LbE<6Bt+[q+B ǷQx3߰H[c+]u/|]IM nmlMq]pMwFHg>0O-WEWwdܲk}t[IkO^>0Kfzז=.*0-ŤȚ"mm5;NQnb@v?4P_s rk-;m Ic$IMqjpkx);:u08H*{#M-JMDʒh vwBQE1*6Sp ޚs>/?YTg-!g EOq9wK~4my7ھ"EhP9#78wT0Ŗ#R*2;$r8 I✴Z0QLFHZ鄒;/ LD+T j4!13ˌ3LAعW%ڢ8't|Qlj>xD#_K+֫>}[0NN޵P EqAhzkEy䠾{G$ Lr)b48PEt4NN&06m`ԊQ6R9bQ Q٪EC%DhAE!hQ\1 A*!lhڰ5ΞzV՟@];5c{]kW`G#QhڥT_I!8L IB@ %$(UeO,R"E@A8y%"N"f]xQ`jj٠M;?i[7A+T#z=A3ÂFp#"Т PDa;шhd&6Qz|;gN{'1"0[AZ)i[ ͸rRɸQ鞕/<"'!OamQDmʷO  5Xa)0.k͹kA +P8"4+%ԣ9X܍ڢIȡt^bt>M 3ӪHMB0$/,xsH:!K6ydBS'ԓ|"9y_&MdP+R3EyVbw1٥oȷ;-=T~)k[ZyK-xcH U c@t@HmOjRz\TknwN݆ˤS -rO-| oCԨ" KaCNljxɅk/ۦp Jؑ'Y/Lc04\khy6,k7i=ҢQ`HkmRN7/'a[!rFn `hn)1H)h;u~Y{߈4{ `2ۈ#@ j)4\ap!PvU6q90`ֺ"8/KTo/7M?yv)/.c3#B0A( z +Q j Z,# ԧ)L(oޞi4EvKǫ[5lDt|PQ 2yvpW·${Vl0Te'ˍ'`d [T.1EhM ] h^UU]h3,yP/?٫=R8qDvaNTMOĭ@m*JP;Gdaј.ab\%. 4WRRE\\1W]-\AR\@sd WB*`i_D9/d^v%SDb'o0@i}u6ʡ&=zHP%RՇ?Ԃ#20,,GcN v3TʹVcraLјmcvKI:sls 5ΞÝcvwc5WwV1WwRv+vs:suc%Vu00;s%XU░J3W/\"䭲$<ڜIsu7ƸE^h0Sr=koG0CrD~0p<"nK ZY"$e[zf(R5AdӬ鮪M /r :ۚ.&Fd+v+0 chYOF:)-(n7ZD[ .`=VsdžLmzu; D=nVy?.`o)j3rFk#vE }w |׫{Rܟr(uO =Ƿ2AsNqמ"*HvXtDK-RDn&@<o A1'YЃ \`^ؤ@+N/Jc 5w߬*fB m-D.KRYZ8%;0h -&$yN%we9Iٍ4RM3'mc[n']ѡbyLpaq#m:) *N w> b|*_-ҺsW4̃bӌZ5(2GeVfK;;Kz,q1 8ʏ5A8ƚ[Ĭ/^tF%d|dJaJIB`K:y$:d @_!"_ h\G1Y 9,8:Huٺ5M|d~Q%Μ2tt+k N B S%GW~WJu?|Bv:Ebɿ? Y':FyqnWNOY^.]^9M7{Ww%S3}EbI"֐$ 8?W06MYzo#-{A j#o&57Uy[{hR}V;_ù\54m#+WǬt '˓폪bUN9gOVQu7o4Ћܼůs"4i[_9ߤ'$!szk2wLlgy|ϛ/L+\ |J'o/n )xN[uo6hV[7~ׁܯ_߃'Rm M׆ [ĵ[?Og ?f6<7K&ec?d6v+og'I}HeW|{_]2'}l2$-e p-j.]d>yz^.[GcmլtϳDWZ5 fqKr/]-8S@58ɸBaE19+~WlPnܐ޿rc7pVwh.'˜gi{v7.ʙ ̤\GORVjΪz_N("hʽ|N6'|Qw_{w[?-U{G1y' V{ ,]FgDJ`Lx8ΚeamDh#2T@DMJ!! (c" 0d>_pFf3zTh+i[/ĝ.UvUc==$ a䂰r" V)[χ,ʍ#d/1rW-{L@,* 6Wӿ\μ 0!b(c(Šb(w1囈GuD0:弊do b =Ne**OIG{QK(gYdVZL X/*cl "~w;KΠen/$3Lq.[A_3F+':q$1Hn8ԏprfFh yh!dk )hS%FDdʼn,k?ƍ4y40HFwGMmd:Yzʡr1JH+/ä]IIS,5ê4L\i[qIAW.XiG:=QyV=V=D 9DU?M T++sIp`9OZF=Z`/Z IꖄK-gշxMCueP>FTLi? 4U^E% с ^z^|e5s:))z9z9PM ^402O$Wb1eJH}]QXNdeץ1'5UddϕB.2KI!x ^c;7tohdנXVpaG@lZ ZU/׻G)l^S*a%=fnOA/W5LZ%m^Ԗ먏M%= nA\tn/0htetՌs<f́2Ӱ5k4UKǃݷ oޖ9[?Pych~>)m4_=8^4a왯ة\z1ZҘi_.j5>oC1ѷ-6!\G,}z"NGK,u=pK]R Ɯ]6fr݇tG\a+u-/my ~ 3!FG02HA@RWHbL$R 6hxXJQK"Ya&F2Y/0 o(AI.1 #=PY+&hwȠ8ľcdzC`$܀$}ەWÒ4n7ZvЈn`K>tCUs=YLZ8(Hd#3F(lMH9b_ZG->3_ޥQtQ|mɢ@BT&2 ,<7>/ t'H1ΣEuyK1Kpު(SVH#tEʙI0\9 Y1! JIY(F>r}o\?3MkfZtGCQ! on#9ɞ>xav};8̄L14\,'#DEv$pD[ .Q?\?TPotT?TP87Fy?ytDL~ b4Sۼ5`<+!-5sy Srfj~yYnAI^/&%T[Wgi;~y|nnX"ۇ#G}#֣%'#ph[֝Cƻ&.q}"omuӢ'w͓͡aQs|VZwpLae%D<&mc&>$7 ߪyTϻ INWwmWEv\B3.V m~ TɻgyK}LO<l9ig*=]#]>ޏp| 1w6-`oo9%rT<d}1MWgiZs;4@=< r1Jvv;Qjn֩" tTn^7hL:>ӧ_6df4Yx0^2 }x4Fc=6ʭO<=;lk}sVhPVxf)2H} V)-})or@Bu3E t]Õ7 x]?g/7rv;KpOp:sg:!3$9ކQ" |V-quu9j#ko5|=ꅃUCΉsJu{1C2 WB!] {gjqʍP){H!bR}Ϊ LV vecr3%Gہ'G%G1(cf#c%$pY{sLk4' /ABH4r+7`>;8򐸎>rd |H N!2g@Z0LFU9&_Armm'gj{q_1Դ$RY`[ n赓t~/qvvNC*C>ժ's/fz"w- xPx!mw17ͼs34i!K?,lwV%pқ4J;=֚3܀­ xzO&YoJ{dGnxY8v,񄪬MS>rD}6t+KC*5M)}B4%,& n}09 Wwަzޓ:?fz,v]!!}!́BKZax!Hu5Hr`!/]; R˅@'}fiseQ휩tjCSR\G YXk۬2 ]A[u)1'U&8SʢMl1ׄT dN)2:)MГ/If<=8+𼾜y\#7|v$M,O_(_TmSGt2Q~%)C:@fu,**,!e# %SBfkR.|x:|4q-qmNmJ5*Sx0 9!YE8(2BTNl@1- [l ;hlgz٢lEL`K[z@U1>tQ8 "KY(/ ˊ߃R%Ә՛sG>#vª,+B.^DTtKiu29 CB2j_qaq9hB*Db Cv1IcQy}Ų[:"Hh8j7M3q7j&nC6t݌d)Zy[ٹE1;2c3m `P{ v3]ݾ=9k"f|`>K@Υ$N.I*Bd ṃy,ز1XG3 :o|IЛI]Xܻ/K8'Z(HMTlℕrYl2'>gkʼnQћj3浭fV_R/MMGQ*#}`.߯~>|26z|I;ܟIIϔ#F^2ԭ!G+qG?__ݬ>G{Yһ%|C^>5|n Fc^:o{qYbO-c/nޟ_.nu?3_Nf`21Q݌}Ѩ%G.E_}b<Q 2kkH:/yE8{}Y:}v_S- WDCs>B4N5[W}myGEZ~j`\C%=XMQ GVN,B6K$l1K Mʊݕû`EnRg՗H||9ufr84? g ~67pyrCXe֫mo!qq1n5,GmCܷfv ioHΤ]{4lowV0{(ߑo4-?OTT} pϔ7B"Q"9"T-E聁F̠(BaG9:`ud8$yvL7$j]0--Efۋ?>n/!eEĄb(.9LVݲ lيMѮ RG3mbslwGӿePaz6jgG;(Nx́idouZ5#b7__qS-c]QYu"uh"z=9/#<ӥ|y19,r i!mh yo0R|qby K !ս}ZPv2wkAxS8ULFV"Nqt;Q@@(8l1qgWYg+̒1PI>%A\X6)7fӂO`}ɹoy~J1voZSa~|u[hu9ɠ\x*YLU"t QlH$ӐYoY_ #n^1MM4{8nbŮ(#c? )NxK缵]Y]]$jFR Ptz'!c"Cs1A|:1FO _ & N,, Bj'Vߍݹ.~~n"/ύ|K[o\tt tjo舗E^6f&v7@IdC^vY2T(\tp]QZ*<4zY[U}fHPA3:&+ ? %Lm@$ Y% m6}q/b3ۼ&=C"Kחkq|wp*bށI}UR|ؽS_YAԹSK\ӅIyrkGԅ Cl+2 1&*E`I:T<+1*a%pN9Zq=>4a}< 7{Bwz1ufz6f|&6b.$hEf!KJRJ FHXeV7.(;s^ @w%dʖ1JH٨JLY/ ?M s'k_bU'6]SqY(4`nY݌|4¤8gA@= )ClW]q 4%*P֚0}t{Q)2Q&ɐ rBEJ(e}VΦ]u҇?Cn˲c*IH&]Ws (!7tTErH(IX*վeh3q (tTYim%A;=lvLw Z(3V@QyH DRFxWI5Ygtd=t1av&\t&b9c厄\bѹ[Le5I AnD@rv ºetTb."c7q.Jy,^v]c= qr:QvgK x0oE ocPiX鳪 8EJ4nZ49Bfd(ٰIFbȖ|HE'9Ʒ~'La/6Wx48~kO47x#nA*Xc("cvP&\|dQ.?Pyi6}kBY#HWHHU~&-fNlI+4XN5ق?,Tnn\G󷕖__CJuGL">E@PQL%((Z*]_܇_<L;v}.luwMNcF> <\ٗQ̪/Tp֫֞JuWvYiuRF9L/꼃}4)\}YGoJ\A|v/ȳGW%˓Fe?Әoi͸PXGhX`+D4UgE4'?J nh^aDCuQ;%wUvdKBҮ*;RwUTC5+ÀrW,!{2TKk=vwUkpW|CNӣ椭^F=Gz͜:bD۰u4`ؘCwt6rbcܧ1;`+%7+-P>㱍E7^W+Z E tez~PӯonZ}71͕Ry=,O<dGь'V<5k4;VnVʇݵ{Dz Sq5J89y#zC_)#k]+^-6zmɒ,$eRV #̃@2.d(= #/;X?\I;'8׊6~9LB\Q' %ٖ`Xn4"8A%cn ZzvJLYMjevM8|@i3䂑'$V TeaIb1"ϝ xZp-EJ\UYҶ`,Z+-?Z ~"ǫo.QmҥR)( SRL"K }86ɪ>-t1i6fƢIfEGrl DUJ =a F"[Wb^I'FKVf?Y `N,a2$ixnC߬倵I%!*0Bt]Cֆy٪4T5%b<1_ KUkΥnDA#^x1ѬڊȶH9yIIHW"HP%dB dfNU ' ouqI%zh~]U 1Wmҹz ֩ T2|,R $J!wu(QUuPM598s#Ԭmj"7']Q𭻎NqM=\cC/&t΄քUTRJ$ڨf)$DHT֍KA_x_%(,ƓڮFR-+!5+Uȼ96TB1K(#(B͵ RAvTuD #g%|YUT4JltA)ׄP[ <4Lcw*:5A^89MJʆU L7*|o%XZ,ny΄±e6֬l@</*Tk%X^baM U#\ "U+wIQl`_&t>4Cք]0/jQ@FnEo{jPv%4_t#fjw^KCp1Ѧ!lG->h d(!2QE§iDUuAΦo-JУmQ1u(.! D5tTjMI!0P9|P ʣ\7L6ڣ~A!\=7+ 轩jyPR( HqGa zgW؎"qݫV](Jx <}kׅbKRW eFjh8hY6,@vHEb$ۃB6rZ8C hB}foA}{ڏwQM%1)!9Y|1fHTNeh / 387ޤ[C/*o#?8;Cu8f9j6sLP-$<|QwC]:*KMg%sJ@IWosVw e`:#'.UaBąQJC$JE[3`)yRh^bn_1%kڈVH%Q۠p ,j%rG7?XŸs mG7j%eD!v"8I(Uv{]_^->wyv]8Sk3 eDACuC.mʋU4 TɣB/ 24nF_%_LCSj+cY5 bkVkdž@<5^o(bu5j,q[ft ቕogs!3`Iԑx9v"Ϊ"beҪ*(-QԆBA#zw7ygzތ ,:W-b,zVY},5oAjVH!hDMRuB[uLj!]vvi  i@gV3zvhm x/ ǯޚнE;Eiov$l& ٶjŐk9Nmvٗzk@כ7Sn3U;, 3J77;cP3=+[{ ׸ M.DΪ4v5WZsL5N&)k5vPieƝ=4\|Uˌ4g<  ?AmAVsMLΤ5 Rw>ڡӆU((BZuh!dZ 5ŜA+Bp#ph.6_5fs˥[etiLXTdǢd֢dy影!fKZ,vRk5c YZ*y׹]Ov"BQ>@J+e2P1~/|i)n}VHy5 NT{0rꔝC;VqA~ALi6MrgR/HhNSټZ}ڮQ/m|(}> '?(XRŴlg|mq> ~$> H|@$> H|@$> H|@$> H|@$> H|@$> H|@ګ6|@npڗ6Y)Q|@{A$> H|@$> H|@$> H|@$> H|@$> H|@$> h> |@ hprRڥA|@$> H|@$> H|@$> H|@$> H|@$> H|@$>42=H{9q01y@L +>8O>(ug8)߇o-g~ iB33.P;{ٻۻoEӐϸIg♊\hwnC@?d<.?IOb؟$'?IOb؟$'?IOb؟$'?IOb؟$'?IOb؟$'?IOb؟b|a=F7Wx)_aq}~9_uݪzT9ֲj5[ FֲFn-P[kna bp*ZrŸas)Fں\1eC~r|\Yo髫Ed͉j.ijS4rҸmɕ[ WN꩏^[^K2\1p&?\iKwϗeڡ\֎T]1po@(rŴfru"W;+K]Hl8rŸdF+}\|%i/rC" i hq)"WL7?wŔAjrjf}F7 2KoB|QFjrL>nU+׿bk^w@9nZY]_!Pzi?~X߾{So!]W?gWş}kǭco7}KC< ˟^›2u\D)bfΡ5G:>]OO{=Nd\FɟLs)ϟLp[$W !YM2QN~JGV QxVzmS>\_LK/W˳B6 1k)lȪ6h1z3ǟ=!08?>e7wji4W:YOQ!)7-7Z zen󯷘R^o|H0 y\+՛+i"WE'Iq:2F7Oz\1K"W;hmܤ͙1*^{xwt8j[p[miff0**AI[ۆIdt:8\8)?\1ͯdJ&ǑGU* ^ͩhYkO\]-=f֚@SN|&@r`c0rŸF+u~rŔ>\PLrd@r^#W4\1m4[+"WEl Q䊁ib\gF+qrŔ!\P(%@r`0rŸ&"WLKIuGb\R;%S%rCC8\1F6ՁR^UR&6g/1L G),bםViL"W+=L3;/ZdGϕΜ~Ker ן^6^rk+"WO}Hi뵗'upɍ"WL)ڡ\ʞ+[톑+5TWLK+|"W+kln bd+)5\1|3ȔF=oHlF(rŴm]Aڏ\9#4wqfq0sWMl]ʧ##M'ԃ)b:<aNcZ䠞!Yd}ߢd%dG9arjC < 0Lh Sz+;`#80hbZ.WL*7>ŝiAyH:BzMV0\X0\14hO'2JgEv(W{=WF7(rŴ)l]@Uڡ\YJv di(rŴm]rk)\}"z+jbܤF+:+4Ajr`+듟' w+%ں\1 \})谽#&+^dI=> Fʟ>BefriiLevb3xH8Qi_tSzyUG>HKCVL m.9wJi, .`@*! Ҍ(* ڨ6)ߣJ'ɥ Naq0E%LI2=zRNn[ԉ?.է}嵐DrrcrEru"WOy|;UԻ۷WFG5~{}{?q=n=!XS{ w7{uowmY;z? fw,1̗C=-)R&)A|)m63vwu{O[`GWp{55vπJ$o ٔ#wW`5 m?=tur@0O{t~ @dWg ޳|7sgПUE&/$9<ϋK̎JkThoe?Q 4OJG^*k^QVր`/mz7JtBkS'(xlEwT<4OV퉳ksW{}pth~{M}~Z$kW5Dmkg(֘B5tp%iMlhk:]!J.;:CZ&u-ѭ+KU[ rtBwtut%Mtj ]!\Ӛ` JNWu=HWR[h )ƒѕ>jrQr ]eFm9/*/۞ʼ6xHcвE &A%A[Ru0(ì-+e8!5tpl ]!ZNWRvHWHm t(+Gttu>te`O߸Q}E0BL·h%45oM#\ٚ њ!J:>C(ZDWؐLIGCs<f!|;tEXA79 0c; .?q|ڗ^CtE;:Sh]`JCWѶ=Ոa( Yk o]`ik B49ҕ L"Bbm+D]!]I"-+m ]\CZwh;$wtut,qSm+OJPʹ[tө0HEmִDrQ ҵg~jb0 Zwt"ZTDi::G2R[DWXg U#Z @Ɉu% 0q%Uqy@ҼM[iZC3Uhm'@J.EM=?= TYv>CSʃSOuZ{EM+ձ2f m]Q'+_u>W643+EtjB5th9M+DDGWgHWd 5pSrZ`QvYҕ6+d[ tBs+ɹUEt% L.oB5eΓplyRlϕ&lvIҤ5d4DLVVLE.ƲZ堻fg9OjU 呢fz:h Oiwgq.rFupx7z=H~U^Oa?YΏ9cK;K2 ߍbi~E;aaړ[?vARܔp7lM~ʎw{Qxme6\ןVa4w3Kݰd9\>4p:3XSc Xi}[-(b,mcv2T5MDc$1_mq/.H_/1qc( Dh,PG3I  ڨ<QJOuLy)q gyO. M&g(峹?OakZ^ koשҀ 4Nr4M!6pD [էKggoÏð `cpCW'vV}vv~m2Th[''ucfe]T"hpw ^xL.$h'!%x^Dn\LX|bE~Ɵl;)V 4|ofJ^M)d{;LFSn_F{%ET6ruy?ј"; ]OKՂ_BA7 f4Lݧq?.7}^U/ʥ$l.iZV(aYWuJP?%L Jh9xIlQ0L2 rThG} r*Ȝq QQR͢^lE'ȩwOKɰ%X1oF #\uTXf{Ib2:(#Aj& iE\SUv^V5W @& 295>&yԖ 2eV1'Xr'j"Qeb7 y";ؠ4ޅvU>u w:ie )Rksʉf!rh+!qi|,i9Zj 1 2480RP@(3%S)R e' f' n'^c1Wu߀3wT4eǕD. g7@Ђ6bjם̃<]Pe.(S._5KҧrENE<$x.o՘ ][dTmZ킀h'Nahi㧏#J-; NJvu^(h|BZ u [rd"SAdqjza$ul_3lI_j:/ܙDh#3DS.d!^*8 Xe8FO gw>QIXam$T ˠ╻曙mZgAL!8>BڝKFnl`DЮ/*iJ6&wm4NHmZZ!:AGgiFtṬP%M _-Ymn^ay{&`b~Sͫh;w"JhMRƅkXcؓ>u8S3(ы824 8Cu<ؾqvlČcI(]2Z wQz%)o=ܵʶq-8{ӣq?eaww-j^^5}z͒ l GqyJh8H1RJׄK#5VwV% Q!`epfrrTkdP܁S@\\Vg}}h7?kMl07e%"EF^ ga؟w.L65QIZQ<8DNȌ.<À-aR Z¤PB;ĤQ'e`'m}P) HRA/Fe,uïOV9Sr))!WRrЂbNeduYA KӸcm[7*v2K W%Vm9rat܊c= >|btwi9WFxf9#' Ma1mYY++Q>4’!NB\#!> Hϩ7"uVfTFR)cb`:fD,NQQ/pQ73g73֧Uj,X.45sEp6CeafٝVx{/05Y_ Fd36!d6k .mjfǼJ AOY2;![pgH\頄W"jf@ ̄MªR&A68ۏb6kcYڶcNn)ʨ .QYp57Ļ3!84'182HҵaJ"dPT@x51b@8cvu g7Ab $غF?R##>1bLj,^CʍLC`R G&if6qAL.'zJb$6+MyԂe}0":yGH ͝"ҩ68:b\MJE^3/Ҏ;^fQ擌ƺ YIQdq)+ 1{ J\Nj/MڬX>58_–ke2M7e7mYNz= xُYV8\xw4J*01TBP/ |(֦ukv.(pR)p42c'5MĬd"{b ^6 zT\J8.pR$YJ,"+!d`$PO1&d潆?L+Mx{ C#P*n\kR x7:g;:<%6!YnI0/hs3n1΅@ 8dL1RI`R IHТGfwC"^hh8M:gw"KC("Mׇ ]WƽLCͅ󤥕Ґ*}@m*\K mI_ә`u|Ν \ug<̾~$ЩǗTᇒ&UI_g)-T45'6#.0`\͛1%oWiI$ Gc﫼J(-x<9Ϗgyx7˨\B!|Hmع' mrq<g fcD R2O\f- sg(g]NS.Wawyqx X2.? 7LZ-ؤu`^H9 TFwz٣w\qU߸myݝaRkTR3;F^u#vۯ^p|ۗ .6-_yD!xmK{[ELfǫghj~, =2 ˹{Giy7]3gf]*3A;Pu1ݓv-tMť>;7 ^r CD4;n֖3piN>̘r!ǚ IDTG9i 2{̙Bm (q˜i 9KpGp*sX!H2,Ag¤Qzc~W/quv:滃ƾąUcD_qJmxT!DRvg FٓBw\-^}E*&k, t$l*~h3h]qQ336f&2#8&K PbAz|VNh >rȖr6Ĥذ'Ld/\⶜UmC*-:yR%B*ȬSa2}P쒈 vt նrc>}*YMOm! ,?Ց ZvzlzĀW Z!7}ַe2߶V#@&Mvś._x>$>+iAWg/Fz|t$@-aii~_èq]7Yy []pErP$ʚT@dYc/sAfZ|MX?#)Qt& n m=t_I&tUdi{T][&0VᚓsX/jCY-fVR;kg _ި*+JV?VčlqAb?=clpz rϺ7 S("([GPBL z:DMy#{<OKK8d5TOe/8D2t^a"tpv y| $XI_,)@Qm;bC)' % &d.BS)q 99>vq0rvsMGe %DZ&Ҝ r}]kחw{#n[.vaYK#32:C`9jW.r2%U RD'${/ cy+MLǬ D)o:}PǓ%!+VlXl/6q SF0FXa[usCTE w^GA}a [7N O-;ubҒKfd\dʛuZbbұ7I<'2@8cLyb'a,Ns] c`Ekl\p4Ax)j-ǓX'AVɭ!9=CȨ SJƄҒ2eRIq< =q+hA <,skRgװR#jkW4 Loוm|SjBѓw,QGmMamcSppVzŚnʟJx:6f`;#rDڦ(H#jW(.(nC3Wޖ!Z.kպ3 Ԩ̃n[Q-4c ֚7Ǹ]]7Ch9y_ĐUu8۷(9ߤyOHJp~̣?/]^wOynjy8|}7ryë&8KQNzy?s Yehj [$|ǪDuwkck Cba|֖T.WUn/,CvŸvJRu2,Fni*?ʬwP-QAx,>|mFޣzW ?;YUk.Pno``Ջfy/YnM֏@/bEtG$ڰ^lTkyW(D[r߂K@y{' 8_OfZdV_B7MG|^qL `xpNyG6"*0:J{܁YkPFz>PDrh"2 J;B B&kZG-aȐ?lڟdjTHĞH{C#|.%^y,y2&&cRs6H>i6|>PRR̢V͙̈́J'JJ'%ѕŰŰ))GRq | b'tCNCz}%7*"3҈I{qYN2#Mg`4'{Ք4NsKx j'G${\A%9rnчIF$c MWJm]DoM' |Ӛ FD5HN`L<$s$ @g8"₀yL,+L6e:C* y"CdBЇ FHS jJxw"=YŸ@7 <qڱ!yvBׇ~ք^qY'^{B9"M|m$/uN `#춖^u?wOz;O M:5ָ͢x>֧7zus=V~T A&M`FT̒ېg tiAMY|:FfE4V뀒y'QQ?-Jǒ]zxup9 Z(|g$(5A9da!EAO?11+digL2KVg&%6 &sKZqu2&Wmu E2u%  PEel- S2Y cT-㉯%q?FUƄ 9¢3HLh4$mEDgjZXo0]a-0ysuJk]koǒ+?v? & .6v~\t˒.%9{(H48 !tuթS==SJ%z{MktK&]₷/<6blONܕC4rtk\85ǣw!!!cBo@ܐlRMfk8V|nv0Vk5eK񟡙^yn89_Zϵk51:kʮa[+|wHuXuB輇( Gۆ4<:!V!;rz3#mtF>޸ӽxVD -PT=)[2&Nڗj;D MnL}>[m,WGeRu͂~q\#og߆˺WZfhWs'͎_wl&mmGGqI_)ԍ]ӣyNk~/^`$Ł7Jk-u]nyC[Uɢ:5,=߿Ym`|gx -n~s%MǸ՞=FOBhCΫtuyǯ};{;rO E<]ZZcuX_xe,Qfmx}b[]Ⱦh6]?kw{2ZOźboܢo1/H`&,.i?e1eof9>p[Ev0]mtZ/CWۡ$-[Еc^j_]u^O:\ S+ Z:]ut PPN:p{,p=O:ZNWe{zteY `'CW6S+FΫݞ^!]Ӵ Ui*tu(^]Fbv! W'CW.MhΫҫ=]Fr^[j8?;%ٰq]⸼V98fGz~btyq_Yʏ˭1g_8هS"~i&+|l?~W{<94k?0۩ώ6]ϟ@j/_cN+ﴢ,v<5yJuOWĠ9%tG4i:8'DW0U&Sw:]uoSOolko7.Ck^hv(^B[S]Cdua*t :]uҕ soz:ܗ"ڠwR++0!`Rv2t3h:]ut 銼WA&DW-M:ܗ2箴NWW+vAY<̺P+ӣq]۴ゃ1.ߚ{.Z™/- ^{ퟲ~fң 3땺vy?2CLP<ҼjdU4"&r "?iO(2JQϫ6b0C:ߎЭv{2;: [j<::AB ֮O/6_MzƶGV1q9|I^u4ަ֓NVDVzy[ow+ }F7m9|xqRk̹$coYy*qdͽͽpjUbe}c34qv)JZ̔UrQ9_==;nmN-CámHF)mp-)ʒ&SDSj*0B?d@N$劉FT5-#y1; oCDwB "OpUڃ HQ搕xY':A!X2NR]׋x\x8oB=FΔcmYTE*Q̶j6j΁Z)x~gɇ9hnn ;Qj)J D8:Aҏ1|1!-86d VjH) AFK!!S`Iru^(AxNQb9[!U|aC2)WXqf} Y+Dn|^\,`K߳?jWjA"vTFE|)ϐVMmc%[bn@E E+{a][ :8{0GuH}3M Hc0y:7p,jXR]qhils]Ѩ(AJGl4xTSRlk;j" XmR츊P e؁pIBnTzC%52P(SP| Dak!$ ( <*F5jQY'yLBݚ uLA#!'aQ|Ѩ`QB's!H)DN+k2 僷$Pp0XqyT/G%!YEGknx$ ې x.JrB#UQg15Ww*VT%gB]V3I|Ӈa|?狓#ޕE d*{%}}6B@FA|u}LǠAJ./4LEpi6nk ) ` 2;=ѣf N9Qv9*յc[  RH2g_XM9"0f,mpZ["F̓7`:%@'(Yn0SΨ$1S"iU_(M.ɀ!rP+ bAơ""Ǧ;02 IP>&~H5blYNP,eẐ@jYvMGHhP*֍QڀJj*x/T޶AF-w+$aQ6h` p[٬XݳSZ60Xy=xyޝt>;?hL{vxr\7zey\, B=L ݸiF<܆K`-෣ʉf15W5fӯF ڴQGբFKDm1y(G? o=-P5nJȀ5aĐ 9 ]QnDh-.?gގr9VFیLv0X' $CBfh1,7b;`x߀y 8MBiP'W W`!GoaQygxy*aB1+(i)UbQ w zށ U q+mHO))C1k{<;i톑mԐڽ *X$%Zd+ܰfeeE^/#"E~a<Նxq$k\+\ODVN %XzDI4`(K$ie҆'T + {iw*NaerXV\ R@hSz`iBw K@ж#Q̀9 ,xBZg";0++B>?SpvhV2LK{=`zr8(% ]a$e7a>@p [X-9V,jwEDX`0MvGO4,mP,]7tbpZ@$إkts^b;J3;Ҁ*[j0݆z0y ɼ c(-/XVx%BP9)A*W!g8駵{a}q^m%@ݩL&(# [`ǯol%2|?nT&n0 z :٠ n4*L> &,MTØr `@J0R͢l5]WS ? 姥οkJfh^MuwIOuf mmbOi>q!nx>c7@w\jATLHgNd㦇y@Yzˎ"O[)EC[{t)-aW#"9kͱH?t*RZ WHbrG2쬤p<]8rYMIKq7=O&qa|/K`hҶb+쌌0 m߾:&Fn >YUP^)ѮЌl9$˱TX2 Q%4.Tx\2MEV.߇oOsϲWȲ}&e'4S^7wt}X8o$?o=zous??Ԛq(5>_NnR꾇R3b蚲YomH^z9/Zx "4|^s>VG>ø<2\YgL%OUy),aPVUzȄvkqrtg]lCn4w'p@wPz~2{%0e4}ܕ TO_f]-୤k{c:&gJTk5Ծ Y̢A.;[}~z<gCwN?o&)vY~x Բ;jA}1+(XGaj?ƣ{נݤ?n u O@~EUΑ7Kzլfz2BXO-HƏy/ս1B[{ LD,|`,#gӎ`MFikjH :yZIp@DKoJܐ`^'ZS$cKfz\o^t89weZ}^R-GҤqz`*KJhiM \0:]sAYQk:g*Ea@Zy*ZT=M/cn^& GόGEZs<54z}Hʕ=# R< n \cϕzu蛱ia<8^5]GxmPkfW(p~Kij7`QbѡU=;ϣ9Mv I{vYW?UwSi6Uvxmkx{꽐'ꈎ޸GsVeGsVЏފ^ћ\-K.+ߣWLRwtHGG`h^M7]7vbCO!B)yA-MnZWOu(l ,V Y+YES\^Ձ&q!:駫ɰ J n,wɯܭ} S{seC5$(e`,(̓$dd#gr7"uWd㲘7qv#cJoXx,P!>+.%*m_tHڝVt{OОl@U\86`Ħ6Lt}]`LKqc^x%08 ‰rOY2{ɡZX{.2½gJrw٨3163a*A7qv#㚉y(x,j۞Q"jUOaVFlp ͂;9}C 8L0I#LEY<[IW. 6;g<g%hs]ͥ&xYQ4Q0uIяO s'e"9"o0|BڗHMLq99|X֚g4 zmѽ$7;)/~^kM7"ஆ:7lz:Ů_tE٢Epz}u>u1s&ISLagT6(]E$c!ʁZ2$)6K'~vK 6@CEL jh/֣߃yJf|Q.ܼISv 2ep$ 7,zr 1 )j!bKj*+%f\V%TEP//Se;ora5Mخ-Rq5>=y`/`Ϫ*l+a^\1W]j``oŪ7 +(]۫vT,ݭ1YW%+(8Qؿm{UݕiyPȶ-n'ѯڼϜ+l+Qq]9Z+c\4Qtw^miWe>#j:Gg/ ӻ7 x W\=FDo<̾Z@++e2G\@3!z{l~J)Riz|.cgrC);.K±X$:IXL%Tܕ:Ƞhqa!VoJ\cÛ|/mگ> dkKi7="):|Cb("(Wԧ)'˸ >%6!Y˄`"³ B4>$` @p*ɘ!_3UӐ `~#>y!a"(t$Ȉ-j6j͙əT{*Eu j>CBs@vʐ@{x'祺/ v@nFo:3<URB(lQ ^UI82%e,Ro#O?3/"BO' |iȣ ]*(00bThyuVE\)}v^PPaQˌ u݅QwC=8}a8UE1 V{oqY0e!d=` +2&;Vʱ~H^Ȣmբc8ehPR~I_n ݯqg_8; e63Gs PRp>lkw1Jd0pLDzoȈtκOvpqD0bJ$to7$^iDYhQR8My|R)}?x7\]@壺4r! Df_ tr9(0aT['qL/tv]yS*!Ⴜk{b47=>?݊`| 0|Ok] 6% Jw]ޞ'marQ7z7>Pzo|WgOO}>>4},v?dI6JkwUWߪy"im[Qk~AMmHӣ^zeWUo4_m?Rӭ*U{}w7‡jB+(mbOuߺ!OWMS7a2/9Kdx%/a1ϐvK@xfZs=4wG=sZ j"P[EjY7 HGك뵬 >m4&-#iOϢϝ~+k~zli{nsaWzۅlEO >hfE"-Y`s]{o9*BMO( w@`{ \.Xd1#y$9gzXd;r;$m~zX+0:xvb@B(:Hzϰ{rQIN- %%; +: G׋ЫfAˋb6b=w1 /oΉf(IcdH Vس@}X/kb‡!9Za2B76(4;h-#(9 <9ر,pJRve: lBgd)cI Su߂% GI4)$)"Qic L$m0JP]p%_A]y]'7F5ͻ:A7?7oXȧ 7]ӂ3G«J -{ǫkFLɌQǐJ`| @â[2[;lXmTN$aNQ{ZB;E_kQ(]*(h$!X{8P4xs_" +0zEb+㜌DR)588Cֆl&ecYr6 (6t@`S㝏WLHNS*g~1Ce ׇVPs#n(^YݭQL&:,;wt6ˎPҖd id0$*bcƢ7W$}JṰb~Z/tv`qMWuV検%%Aͷdӓ$EfTp`o>[HuDJixlfSieퟭaHDMjB9DfcfT@ !E HBiURMwa$*:H(\HC/59Rv@Ei7##HlV/O&y95ފƨɚ'D{ UoV+c]-gd9-L?r7jj藓\ݐ"['jW}%xr҅Ѷ''aɫWvMV͞ +ojqD;5nB*󈭆͈(·UWu-]ry9/xT.}tx^Ka{Tcy:MZՙ^ݨ''[FYwNgJͻݒ_QPG/z1AQUI-ꇟ?直OXȜ=vW~R<[9wL^\}q6bsbqAgׯn44moE8}͂? q@5l;1ߞWNP ̕0=!͋Ͷז+WW|6iEvt3oJ7%yr6$OY-{^{)y~,n*:We 7dc',O;IZ~h-Zt5Igt bۭ|}4dYOW/^crq֬ח;o.y~Zgirze׍(pݻإc:%).=&_i *)Gut|.<bʪPމFJuU?;Ly@ޣ~t؊oEtU?P.os;灱bNpd~^znv'{a7nLAR^Tm 0##1[(Bު"kėeG{s;NNBiZ}%ZZ8g̨tT`mhx @a8l6qAFG_k#XG}v}oi#vkX5 =N'3>d)!)$0|I8bBHQ2,O' e7]{ކs;[Wm񵁆<|&GP#hJ KpFZ dD evBK>\F!)" kAPrq@F`$ZiW:C$&w2[C]oPA]U˖m4ISI땫I <#WޕKPc1hsG4L kx*PcT`c2(yOܴ8Q3$# j-1xN=ox;Fw#kx wbw?=tDM,E5@>6z8:^17M~8GpJHkb4$2IdIl(0H.gIS Y!N\i:Z[hF+XlI`$ CH9j>LXB_ z/Sp1ڲ"Y)dC a-"e&}>f_, SdtM Qc$/AIfu䁜 -#KH-sDa2ddj,,1'&ҘL ntM#r*UљкM eŎW36lJ`o28Aڂ7gFc9~[fzU'UgWSr䯬mw*W]z'I TԴܵ:{^y{Y^̧N&kk/wռD {{nQ=~8d||R_/e|>Z`/kjᢉ-M͛>B@ Q#F5eX{.ɝcjo= W9ˈ\nx]BC*Am|_|kj5/f=O쭘AklJpRsi;Ey:je/YS%HiH4OcMx@^^P^DdI 8]˦)CA':U0 9K%m4ň{A, =$x=nXx+jUȫW7秉Y-ٴy.XRIē7 06^jV0 7n#X\f{Dy9?\tw7iWp2yZoSs>}|yxoz}PZy9(Z>r` PcXtvAE`UEk> p"jpv@m}"EV3K< hi?|\n՗\4yi{s.~5(]HBAZKPLbtpXXd3}K/NAo.flArM1a"o°"UR 1 X7#4kk?_,fL%;>381 s c*䏤,Bj,#@hkT"/)kgJapXr.Y0l` ZOZpc"(AҡhWz7KpnUU)AwԌ{du|1mOUOߨ*O+z0=flˌp!KVPچV jbr,*L6J2q8n 5B\uLht.DחM^$Jh$KBgmˤ'B[;@Hj mb/d[(.r+&Uy&t2|!SLvIn _{]Znm>u:{?=uC~xa NB }]fݺR⊢ XَH두2_]{m {[T hۍ7@Q7Uljk:Q0y[_D0+V1f#,-S)2~ jҌGOJФme xdt&X('JSߵz>4J>#}Mw1Buw]-nn"pT'܉v D` & ddeu]io#Ir+|nX``vx<)Mj}ޑŢHKIRVdUˈxV+8-ZJ]!x(i1{{ ;lڴmO% ^=/^x[#7\)`G$w8"oᡙMn@p*wI,8 Q5"kI*u)"HM״ƴ=yG>R2<#f,fl5'lRR*dW/*?,fs;(`M囏7ï DIhdnI (ibAq<ߋw<~ ,\< JFYmK1Vk \h\Զɋ$N=_jZ3[IwPD8s>d([ɸ"es]i.$1R N:vM<8JJV']S*1XG&d\Ⱦ8 _n˕vݩfdXxGq:\m.0.65F aA^D YDxb'iy`NE]Wubba2S:/%(at\`EnfJ$?ȝ "tJT@A!T nV|h6'>L盗߈'Γ&4WP"KQjC)JtPPiZ`oHN;kd4x+c,:FxI.Rg:EKbLx-Ut'bl'W럌dz[d=fOLkY)9m4vu\67S} '"#4h:d(n Q-&LܙyBgI14HoM.*㼌%!yM;;\Pb٦`*4R$P#e E2*Q }Eۗ[V-&ΖZ>g-ϡcba@i G,J($)惱R &EyY*DFtT$1Z-hWy1H}0'A/]VJ-0.'mUqI`ӓ=y0(˃wBk4|k[qQ >x}Qq6wY{v\PR)'Ӆjx*[PglyԸs 2jbƅ.p: LZUjBHΑ\K% )O1@ۧ411PTΛh+.OiǾxpO@]اǚ~MFn69AeEpHՏ@ +^FӋ WfJP 7@hwRcԳsɞT@}0rjQQ #1sa"P'VPq!(\Xύ$!\ncP IQ97SRJwN-&fЯ\v4ך6=_J7Ozb,ݔgzmY Aib%8sf剜:Iq:VpMOM^NlB7:Q\ x`` \^?}'糤xz}7tfxte~z$6` ԩ}Z+/{[g?c!&bݱCC]gnuI& 1RB[8ry(K3Jto;CΛӑe? ]O{[v}0;SHzĩ{*&RȵMh,]sksX~<6>刭6^M7 zwڭxeW. Bv}4̓wԟ[rz}ȽR[|rV' g7VJϹ2&ؤ/.Vj]E5/mL`VmGm05vL"ԨbXg>Ȼ9B߬I"LK6FxD Z_nʻϻSݹEƵa7 l'3(g4\.R*b0Y.#X< DE/ 1ELVm{tJ;{_KY?uݸ)H)u>H$%u1Y)8em?jNx4Kb(h&x]yY;5>%`P 0/`e!rƸDqGj<2)n4Apxu^Z*Wx @\+5x]5t&݉R-,YwʦWV,DF웱bL},/f#`\ P>XaWւbI$ \HFGKf^$Ч'T)>\)ThӦTM0ndt)`rp"?{Ǒ܏G-@$|q9~R).o zf_>!|]uUuu3-9' A  ۻnM6-B-3v|&Z>}&C~ONH4r[?!]=v|\uZ8-|P3Yq`r@tʆt8tp4tpPkӉ!)^EBsTLx' χNJ<ևXb-(O&A z_q|ot[YDе7uEeƣy[16j>^u/X )E߹^a'?0jvI[rѷՈIvz3F<]QXZ7zA!HOgH TH䤅_R7m@eY3ٝK펙O6x_oBX:8D=kS (hɢyRd,&`kF1>He2TB(Pǁxg:R%PhH Yljkp̗d*Pk_Ar*ZRz@wӰ)kvv˥ʒ] r# 0a4+ JLIe@As/seD,gE`:dYG-5Ypu7# UlXMKHId`0P[EPЌH&BR`@~v@<]FN Ǟ[ebҒKfQ10L:8-SdCI+u 1&<}Ij778}+j2;OV"Z~J|5"WqBGW8V?~zɖo.\nj|ޅ/i6Z~n~2=e7.M?v]Jױ.4fDt&*^ՊF uO߱_uQq\ܠnzg4cI&k%E۵-]>1}8g Un$&r&rA[Mŷ`vύ8A{lK|eN7&-;iAR.D:嫏͘fsז4$YtWbɿ\L]HZDo߯KwPB|TY6]kNv._QPor7[rv}޿o6M6uπj_i:DA>$}׬asix֪B!#>`H-pdNnI&ce{zW* A:U{GR}Q [W}]EϊeUѥ=kC9䐨0*Hy9礀Ay֒Kt~Gn}R)zluϧxSԻPL'>l/H=WZھ~rZeԤL€ʐ䲌%@-LFFu.3ܨ^u#u_l+dM0ν|sYz\Z}r_4?ouk7qD%& Zƌ HgAYs7,218ېǙq zyqWF3QfB4O( s$F] 3h殣|Xldz5)5Pȍu˥}6J ˦J|T51*<@ -Fޖw p8΁T:,rE|3zysC-EqK|BZ rJra* %<]${7^k ⮥qޤo+A@vzs^j Hö^F=IkH+4 `p*^cW5fM]䷴TdFkgNg&dd%MJGMJ4N$x']34 &r8X4WKng,zEU+(*G\ogDc}ʕmCD3GMzE>Hk6影Z,ǝ1JJIbPWb&_LjM;~DȱU \R5&bD ] \}+B:]Btute`C\aJ|Egֲ7~ΦI/015Vr*7䶹5Ro^̿*.*GL Rp!Pշo7J#TH74]sM-e&>EFRU * \}Vc骠D;CWfˡ78c\g` =n'drmtev+3վC/@ ]\)d_誠]sw J#:AL!I*Q$ӌUA+Y骠= 0R0 ] ZNWR骤#*{]\/tUUA}1 ѕF%𙣸F} }HVBd؟4WI'\ެ}Y? Z]_? J~nuսUP*>x'N({DW EWVuANJcȚӤ++Θwx  >z V%czۍB)=i'.pMo ѝ,(S 4s[}2q| Grvdr7HvCx@Wv}KAzZ*p}n7v Jc:AOzDWXI*p &骠b+) @NU1 ԪtUPvw5ՋOtEQUx_誠] ] ]i.0)a a},KNjm" RT ۟b; xŃ$xG 6Ƣ!X4-vC(9AX@ `L 0)ҕ(ה.׃1# V2PCwh-{DYh4]Pj= 4ZOF06?!n Zy" ZCW+f Rz\{䠩j~⻡[t;tsU芣d?tU**hu%g0 }Ү00*p_ $%*h:]fЮN$լGtEAo彡Vv ʮhEJ1~ֳtEf5Jw J+:Ehbw:S ݦBX`{셭CެW1՗Tah]/0-"y_誠ߪ.(tuteOT[RG*p7ԂUAٵtD] ]In v⁨H3a8b+g\?{Ƒe 1oYd@>,&d6 Hd{{o7_|%v[.Vu[^r|-UVѴY4•QV4}D7CӒ\qmi_c{^:;S"tu"J.'X*9]/ Bu03tp ]!Z%NWҨ ]ImxW2vkRa wZb;bc^}fb=K VDwƣA3 0{4eYzu<eCt ]\{᜼'et(yOWo4GWKOD+Y Qntvpʷe/h 9SMn_çl!iZѪG! JG!|;4Mzk_9g7] ^!AK;UvOCk.4+ 'ܮ N:DWXw%]+@+_8+DhOWoJ4+eW ъ֫+D)mOWo8J֗w"\nBWRNCXOWo$ѕ$++:j=]!J-zzt,uI.g-VQmNryA* 'ֺ3'µ+' < [?5%tiu•+thj;]!J/E2Mv巗j2TNW>Mgt+@)&p}F2#j㵏oJi! t.+4 hn (%5=M34͎zvEQtu`}csOk/V˜Cy"JEWb=]Iy2% ]\mLW2Ml PrK{zt8u0#3tp ]!ZINWR-炐.)'պ+thm;]JJUOWo7; QWV^]!J;o wi kt(Y$])kRLv䮓av%WC'D||εk߹I' EM1'uwrϐ:d+sFKvPJ޹Y\q-. KptvOFNwnӿ߿bo~S~s??C%XL^&X8ƙ)~V5oj?-hϠ+׺*,x}ß4_?cyu?q_))+bgx?^C Bl*xcc.'ϯ)| |Oi_.c\Zu粮mok?m]StXB0 ˙q.r^I"r*tLG< Hi!Hh2]U*KDM$*\&Olp._ 'n.;ܴd`G8h_*m?p]O9/a yPlk|Uwbêr_]sRe~x_E"$qP 4P\?>O:1棇$Ľo'2*DGAD:H1RJm)#/FGpTfoIbo@wV;Gвg9-[eK'AXZ]߸ ϧWGL/T0z#{HuHce,_bиqrr^4PPOD%Q0kHŅ%p"Ef$t(ĉlCTKs4P<ĤQ'` ΤOR֑Jp# m\R=|:%b})E/(1 -(DYHVgs.V΂ӫ=S PhӍ鞬MזhgTf=;ZV)0"ZyHdh֥ ISNImS# ުwIo19hƀAbB18j cx4+hfpqk@]ns wY{=;IXc֬_ښniْ^zAk}I{8&+.Umkj|eڻ+q50^20sAXT2I }$g 9FB4ʌc`؇2&fNPO &cMUƥ.C3ffJc\ؘe˅a.T=(awC.iEW;<˷&~Upx4_|Mmh#6l J3c^y%p8 OY25C0:{.2½g!mr^:c36 rHmffl?--ݘuڶaֶ=k vNaVFlp ͂;9}C 8L0I#}c4ϖ)FnTo$!2 KF||2NuҮw9̇S?}H(&$ئF? #ngr9Ӑ,*)5 .rM?sFˀ,#ZC QZof)Z<F$@G5O0fBsˣaFl aF8Lxq^,W:sy7̋ŞY]owdqRؿ+C2niF~Gh[45{*y>z)V?'=(GTק j`fKkkZHL[>mw K I=!R\YwOAv%e.O[&Ч};)s6\/u݅uE=O9 .[ed:tKn-wOM{6~ՠ>cT"yyC$́I%;S{'Ay)ȼ^̈| ʃOkԚ:;c$r ;\&VS!j-L&"|W鍡j>yps: !Jy)\: gt":@.0cs?X4Xq.J&I++#1F牌"*K(V)eC*'tܲF|Ӄk<&J LjI{ЁYq p. iYEq7}~ M7w=E?}3[}3{ GR:`DX0E`B8 <+HO!UE^-VQ0Jp',c^Gr8(gX>KK 7DJ@Kd)=F,"+!d`$PO0` ͬ%M9dPhyFm6&|wxӞ9>FI>q2.;=1ya@8v0*x%e$&>"6 /. ,KK o-$FF1lIhѩ<xIoً@4sDcZZn#r#)Z$lJ7r*9o-Q:<♳&>օLCTI7= K)h`y5o- и# 2a*Kua<r^FgYXMPe>ŬJ89=˵t}Ϙ3Nd ȤA":%/9'> :Mq$lj>1Q>(C˃wPu])>PzH_G0t*?:Q PU{JI2[P |?,f%aRlC G{{P_}p5N~֩O[3'~3\ U@ %2uE1L R4/~AQk(4a@eHYrzp,q\8-Q$,bfn`?`G-~71dv2z Mcwݨ!bg!Yx(;`֌܇$ ܧZyp*J."~36$$&FxPxe1)3)E1@TLRRdXeyw@t5ъA7I&ey⸋HLQcSύ R Rda舷١z~LHw]yeI?ɇw;xeҏEw)*wc?V]x\I:*Ha$H@4i-Ji6Y|)ܻ8)aby.NWLK/o:y3E!-˔OS8_cJ^/J եzIVgvJ NKxqp2ϋ凴\d,߼,rznNFއ$Q Gjڊӓp91JwEqLj4gu($ wd 23-5'@r6?)A^U:8Koxc mIg@\8z]5hywN5Cqß4eH?S4H-xkeO dul*_?$G LV@$W5PGW6%G?Qryr4.9ڌEQBS /9@yaVNh J!鯜-m ؠe.rkUh=OG &3BG@Pf5jKQ<0]k8l;16/mմlwsNM|p7h0 sU+odmZMa#8T^#ڔ‹斣%,/z}i*JͿv yAqL"ˏ?Z OKmWdqT5 ArQS$P4ORz" o^6w-b/AHkIh{6p쿷qi&jZMѾW놮r{ڨGcl ~Ý^̻t ]՗;;_^9YXby n{+TqcS#lHzyҫ {pCNj8TirB!W=T"MyY{ҲL,>Ұk&~M.P%/%kwVEA$ IrScG76֦##@ml~PіRX;jKݝj,ֈxAՐ0uyƕ 9*x)/ c[!{Q7x>ŪmwMA~ cF'a9@IusC!icP0OԠqvJxnaݩ\2$Kh"РXe1KW0mMkkER!c̸`- &[%Zǚ;q JT uli6diXA'6a*X_JRfQF% [A J g![#='TsG?|FLmؤ7m HɪEa459H:90~NWvZ6{xL]ZyGڦ(nFU*/bxj<{|mY Rofxݥ'mmYңǼMˈ}d>6]/iNi~9\3@<<\UB>`)u3_z.wfFA [ Qg叟?O>9=|օٯS4-~X,~қjuG׿]<֭V?r YeYkY[gO_mnw joOG7h֎&kۍ#]n.\yL47SgYɉ|Ȣųv&تgx wʶL溴loc9ȼɓo4i9Nź(v˧|CzEgkӐȿ/1|qZuЀ뛭vrwqi^fŗݢ\=6?~&{܈5cv\]Cr7 [XoYUWy}~MO@яnvgZ~{;S&Q oْo^U{U?P b=恰y7N;xbm+Vc4k<Pl %Žs Y("ۜU}ՙ;9nA[W^۝A;܁c?\U7b^WC|9$,汲̋*J˙n?O,y{Cλ |K{VO'fzop4MW7~H=U }^+4WG2j fHYrc,!sZ̅ӄEB++KŨ~X[ }K.twA[^ vԢ[{>z/A#79abZHUsJdu@ h||&9MLk4`)tL1al0.V0hR&'3FAPV^ wпSq2hn1hv>8dyWT-:&WV;)/b@HRH>g1la =b\%IDWLJ9wXs#x%$uL#n]- !0!IlIMV|蓌 8 ۢ/Av`nߦ{z0l|MEs+2pn}L|^ћF,Z]Dw-`ԡLx-ͭX8QW3LaAV4f#_xpn)W(8P 7SPi+Y,}VSa[SAŠcyf\rb6yO(S*gmFEΔ 6x5HCH^$%w9!H8aF迀CE1Ah9DyF#b46O|U`)>,MLH|&F23.r5Ke"v"Hd)p$~H$gVhΤP$My&m}%f$s+C`KQq|dRh81r { 6!,0mJjdr69;Kty,%y D֎ MVj8Ü+Dƚ G)DdD`Hw r̊YZb~)|f\U&%&s֜ZtL&;y۸:X0YƳұHr#!ZARA ŒZa*4p'1UYx )6.t*[BcBsJ \¢3ΊDL`4J$myDR3B"|؄o0ٻ6+Wcük 0A`7_S)ϩ"ZfZ&[" ds"kTZ r)9K%RƷ ҌoM999Yr5RАwq>.%EAd=1 ;ʐe~X'׮!\W'zmRw2}itr t tl~xq#K}`jnۺ|xra_KytlX _m6=%jޕNjfI8ηGk"7\2ZW1]ROb QR5шdd Yw?2k1*>}'fu?ƮX|_DQLJ&"}.Kk[LpRˢچ~{6C/ORYV=vptZ-CWۡ\Z|t%+y]+k r5p`誢骢4HW/&++ *\+BW@+9t(둮^]IM l0tU*6htˁUEܑ^ ]DW\Kr(tUѪO+ZGz1t )ٸa].?/G&TH'Efմʐnao yG?]f%hʛYX 8~vuf˻;{j;3gj/ POo_Pb>胭;|cYM{"n_= iP`Eb0n(h5Ӈ?+J:>ySͭ]ySNmc0חț^;Ei&l v0M]TzFSQsh^`Fc XWv0t~<.ztenHk<\P誢?_\Q::K+)#<  #3:%us\I;۹ov_ |"b0&|r0~dӦ䧋 %кjQ16Z(F 6 jre˂Wtq6u:#x}dzI-sRI^wMk@'~UE"Ϲ\\mTȱZDeh_ccyܮ?y!֕z[N|oi6AЎ!>v^s:v1V1)aߤYt]FgsdfKb!ż~֒oLD)ih7B(,{oF+"ʽQ}Dp%FʆMftY`ojhܣ~KOifJJmpzӶeh,u: ͨA^^éfrj D#ƅuք ~흜^o2Kt^ XWo6kP 1vj鰓B@y|WOO_0ڄL槫UԿE"xj軡w ooJn8w4|'u1?]4tN8+bRt GG OJ9rJ#9vIbw0}d)݊&6{Nׇ.iMncor?.fjŀWQF#E5+0ѩh2lrs g1h^2>$bH\8ÒqblD#g;{71%2`c.w!V^7+)͞!-Ս’ `9adH8QH.Kw*hD9Ir]9t{EeK(SRPEe@J() 99Xq> Ӓ0wB Όa,Bhk,*j!RRZY=&]_{5aH>ݍSGy20h` ڮAi&IHXH&! 7iF[b@ëkJtx2m*2 `6j2ӰBy5@#H~>\}_7Yc}"*Y+`I6rQgc"vf'^*rr(Eb9J ͊tArzwO>Nz2s,TG_ ϗR"]h2K%S}e:DBH`I$Xu?%INY(g!XN'5.FĒG! Vx'ռ"Mr3xL0VĴO` :מ"gn$Mʈ.)0` ^ Ec"]`bfG`> - ).I@E E3͠Cꂗ [qX%1iz5 %JBN *tЩv%nfA2hDc!a¤1t#W6 ]KHrW4:%?C R,Hź^,Sfz6 S*!>ED2 z'yZ%=@/^ Rt [jU,*I&3UR9|VE #, #҄:@k+>GrdPEό{^d-Ll XX#tB@Zٯ%(!.'@!6CA!9d !LVYP`=OBB2U0 RWBE(XqXD@0 p$u^H$*w!b҄ -ᦎ& @ !X4^G 9-2Z@5Fb@B "eP깏Ch.^y1#5MchBq<#7Cu&A9tM;Żbqh:{F&J!?Gx8!wGzn@7~B,2ɴc`Gvy3 !HƶD=#Z4/`zX+3{}x4=9 4 3[ HmR? ~,Ko.#4)e CB #!B|> m(308 mmQ½g?'_6/y{{PهILZ@ uxB7Q,hF[C# >5;S >5}fQ(5xmdMٺoCFhxC =HYxkv:0#O˃M2  SA& tA}@K{\> H}@R> H}@R> H}@R> H}@R> H}@R> H}@ aϾ&ۧlo> ȥzZ}@, DPXH}@R> H}@R> H}@R> H}@R> H}@R> H}@RJ@ 7sxA%h> ّ> H}@R> H}@R> H}@R> H}@R> H}@R> H}@ 9+[u*9نQHԲ+$*w ZH}@R> H}@R> H}@R> H}@R> H}@R> H}@R^ߍsul5Ro7׏[믷ݏpy~_;<nm-1c[\ؖߩxEؖPD)QWM=\[Rz[:D%Gq%_"8jp%rs5zkJǕtNq@\]Eb4Vu\Z.>QqH\wVmNϤl}uٞ/6B;jqcBϫs4T)NnfE_e3@yᓅ(Aڙo~![2ҿ𚿭._y' o4uSBC*H7_?;:1moIs@ٶqp)4;3*re)>~B%=j'*>X"[cr5ljM\::R\-Wۙz6Gy)7bA]IEpUBWdLkCz{z逧yʹ~75dkte Շw?ݓNr>b;7LCnrLCmT:DDLgN5%m=QȥX D-hW_ => Ws<4f 6jrY 3pWz8= 3׃+qJF[:Devi"\99%S D-q%*KGq$hb)l5ւ+QDeLG2*•N\ji]N%C[g>`l~5;sn~^U5[rsܿW%*hl޾9Qh,\Mː]?0oH?f#:ׇ-<^ hF&7w]Z&f}||r$Z)?6[S>;[gsM; X ! ՂiQ닏*E.ANtl,pC͒톛4g7Oè\JVEV@t5KJfW -W2q T 8W Dn0Jy*sV\-W<W}RRW6q%*3++v9AΡ\ȵJA&cj {:MᝫiR&ibV`ZlRoTW-j˖~gߌ N[oݿr4_,s=Ł~Z2Q_4Y`FsE W"V%jbQuykJ|.2v,0}N͜r'"LCgS En&xLʬ^"7kڅ R=IjDm Ugctp\¡'Ԓ9oJW/UV\="\AS*/\\rJ2+QYڙ']E@\C'~XNq@\[jDp')Ղ+%S|t%*Q\-W)0>KSX~+˱\Xܕ^q@\1GJ5%m=S"W3.j)WR箖s':( C0j !8fj?E-ʐu\y Jz&S!}<Ε+Qu2uJDaƤsk9.{Ɍr#Xb `ZjE-1-*KO1$![k+;SD.Zp%jx\ʨBpe׈Qc-q5[a+Hjrl\Қ/n^zHKs[jp%rW6qqbL>elM5MJRѕ%rX z+JF.W2%q7&U+C59ׂ+.+Qj;MG^0`>{̧ VavZnBdwVKf^=-mq;콾x?~u>Gym3k(ۀ!ݔow-&ئ?Ew>WmOskފTݭHBDpra1y,/|Hsin^׋ΘK`SPM:mI8;ߞcGsm=KCoVo_=:z5' zO}Sl4:Ї_U3~ui8z?|Fw}qRO0iMC]#i/އOڌw{hg?7t=>XxB_ذ>޼8k7y۶ukNVt t~jģ ҏfDt[q[(:B"9o^pd_*h OͳJ=?\;;C}v$&ʘ{7=ﹸ~>v5ĝgM51~n77G"%H6G4犂8m5A䒩fV:_z'*.1-1Kh^ƥm%[Z&^\ɲ\~ƥ;S1VLbr"l-5QY1@Lw!#Π7gD}g#U[Rpw*;/S Lvrvޓ||LƈO'ʜ^;KppvΓ{1p}]c<;kEnt |p`}1z-꟦I{֠g\6/_[&ec;1قG>fO&2weH~X}Fw0`f֍A\oP)g9:ejzNS P7'rfu螛B屰$W2gQ]IaWUD4 ֒)<{s{[h+nQ 1z" >o7?[@38f>s6FbgJT6xA @8#:B ؉|NnĚMZsSo Iujd?;eE(%W(6R%5I[IF])RIӘI0D>5)~Z7pN֦-D,jRjUҌ YyzȜ&T:X5ʰQ2 1 ðu(` ZT֙D6IVBp 眩m-yWe_VN#(c73}QdR;(|| >0N ʼn¦ADhpN´On X; W;@+qCe&`.qmd=+,ps bв_(9JHM=Z2j`&muң uWaq_*U^+"X%奙]ZIcrK:a552ޣMov4֗Eqw4\F!;P`%pUoʢ5?a/DRk&Qr?ֻ_h.~ItAԙ)Ú N-Kt: ON WX{Vutآ4mKWNF0eb{*6O+?n ӤG4 N.%ӴEɆM4+)z~bK<sv \rejheK1j(3qN@WlSEO#\Е{CW]+@)@WHW&Vѕ]Y,Z!NW]] ]1b=+N*p.W R]+kG ]5BWRe7tejӕE@WHW]+]`EoUƦteQ!vut%Ov XЕ{CWu߻(ٰDRaާ& uѕKzCW-]+.4o*]Y7+teQtute$ϯe/J(*=y܆iEh69дE:(pp>4KPJk۫Zj(M t:UsfT {S#]YJ-NWet]Q.%]YWQh:]YtutŸ8be7ޕEKeʢ]]"]qA%=+ XЕhb"'teQr2ҕ&bIJHqnĕLm l>)6QA7&RU%B*Õ)@},TIJA}!WhftK"aq%h-6_45L^x¸aI>,oU+ hZ(3FB)$[(JJ=9a!EYdFt9ppۮGU ]Y@I2kپӬ()z+Io;6m8UKHy-eRvD]*zGt + ѾЕE+hʢr +l?te h}Dq*hȧOj q`%ׅu>}^Vt<G7E@"fUj"4^H8}8_J.CЊ{B Br NԶh K `?o~Mw0$Lwd(Ze3ZTFڤ(4O77B2vڠ$dN4q%yX NA- =pVC巕S`vΧi8z큺4*sNEt_ZnGK4_:d.S449vݎ/aG)Orb{<>e̺<ö 8wOG̙x}KQzޖUw%|p;I"1^-V]\UkO Gq<^}_]/~eOnSOvv_[xm7oW|gW׫~,d÷fku]&z+nh08_{OЙս`R;:뛻JR)F2zm W0Ow+݇dw}#ft__`~يk--&b%re{u}5Z־]X8Yl@/_NƱ /f3$s;̯0B Ѹ#m ?gu=46}OَkŤm;U3JfYJKX/fk9Z.9&'h4Md"+d7'v2dGr6GEBj9Dͮt>S$Qo02#sNdGNۻ`hkt88MGrA~n0.O%/Q \]zw =Lv|IH:b߂pw6W` =(x4h:L'\EׯxKgk G66Қ_$xGw4w0InߏX7#7)AUBQq:FR&a~8h?W".~BmeM6R qs"ڋ:]N8힉dP=e?  /@=XgX+\2"# ME2 aˌҕ+W~ Sz;ޔN5Hk-9 (Y,ҟi$.Gq} quP2E<,?tD`pcLh)J9|?d$0c:E86ZxZ-E6qku_8 FKZְXXJ>EH{T0Sd VABPNƖRrjJ8)γzK0?{Qb-n^wDZEV4^έE9퟉54t7Q/iռ<ŤUnꀩLJmڲ _~M޾ԙnξLs%{$")B<R!r{yhj fW|S))ip3fsvy6< 'yZ),95yv9^R'_d<F3/Rf˴6hm4{9<;lL,$oYQJ8e-2ALww ػ nmV߹zBNR*Xgg)2^_?LQ=<=Az_u%0{nyi#u}/JMqo/N%0.<-ӶqH$L#"17QH$]*xZ~^: epB㆞S-ZҺRa&cIS**QN(TNLv~9^73YXw)PQ) QӦh &(.|a 9Sl63.i5fV& G#:el]i HE3}#S3{ Q1F#zc>j3nP9 i ~ni6ԉiG! Td`l6g(<ؾ]tkX8J{˦͑VƳNn L[N|0t_slh\Z>o='X3F!T="2L(q+. h, μYط r9p#-15ݧԚ}ތfV])}ieL{ |&)lXNgc Ȥ ΀,IR <>^,#2?nDHϰ'LS"4@3Lb|-?c' |dtt.NF)&rCLVn8VL>9m8x}U𰫵(VmRHVVFn`} -v1~e=:.u m6h"nǂܐ(;u#ZN݇dҙKʝL,[1~Q)TX6Hg_b6x7DHn X/0ꕯqCd6ϿjK4EqQEgNJU6?Z(by H iRK%娊~OƦbϓ/v)+?,4<Eu5g 8}^Gwa-IO_o8U 3뗻 [Nߍnlߐ~JCh6'Yd˒8,f&'Hfp]><[l8Ƴhm`ZFͨq)ߪQ|He\q`:w"7tƄ "?CWsYN͏Q(Vw]򛬾PNZ= zK0 pKO%%/]%PM4e{P)ei\a:MyJYY)0g&˸$VYlz= p@r0`N? A~e9k!8o+ش\Ǫh``6ց)+\'BسBsg+=+VAL,ljDD (4l'/?{ Iђq`V ow,eV tP,tk ݲheM hb/﬊+h\sxEZIy~^qVA["M0E[Z qz\ ^,1UVәkC,4-?$^F[VиvՂxX!ߥ{\kKTǧ^h h=a'9<2 :aVG/-)1WY"\A!e:f_!W.H0`3 _PJ~LtPhV94n2;f>erX 0b{M~)R:Rǔ a~:sCsRRq΅H9OPel7͂A֕ $&jۚ7-ψ u!4Vyk\F -Uzld,p<~Y`\ .Kl{'AA5')̓~%Rgy Ӄ4.>Js!̺34ُ < v}|?]m5BJߒX RXv<$qC,<;846:I>*l&s? 6TK19o}&pvvóo*(o&tZ OSD$2G?G0{[T}{G`y\hgdlQafmQ@QRs2QjsyQ+ZԠ8yYBe0R6a V9{%Ŝ_SNHj8SDp0vF8iC(y8-_b@Jsa*T,օu؆64#a<X=׷"f4~2aCVR#[uؐ!a6@p-D@qiF9Vh:˕S9ez)&ޓ`9\V5A'K߯^we!78ugvUZu\P"5i`pеd9ř [^EnHHHi$9HEDHJR 9GW;C% v?8A;)86^DdDځO ]{k CP<XWߍҍ^hOUsJHx[h2x8TP;w˱4Y.?ϋ(ױr*Tiy>X]_M&dF*`vY4j=x{<-ZB &eG"xŅt؞ԗ*_ SޟR#8gNAXa-YL}7\@a6ˀ~u,k2~$}%o F_کN)BdX坿AYqب!aۇM?~˩֬hq,Ō?CVhi'fQXhM x 6@3.s7NeTk59-=?[ƹlPJBѴ?$FI{Zگ:7qWkXpq_7Ah#F\ !im9Jr;3F~HUwoA+a1[FQݗ^R<&6 NjLBiA$w4>J dv7 *Th_g+;aVV2.O3]q$ŋp '^F|]6Hy:$Q ޶)pr.FaX-czZp6C?^x[eZx"5*`lr/Gb2xZ& b%AʬoFlh!V|R_u0<r2V;ts8 A玽Fu+ }jAeFTXWd ɷ?Aψ`? (ϧ55O`FD]Jn7#3cbXb9ܝ,aZ'cS4iFpҎڐ&$ŻH=c6I IkZ5~~egs@JliB45xM0,Xd=z(.BY|Y5Y )օO "+JQOuV[Ok)s$dP-(WY[exhx;*Cs+bLU.(s 8^4nJ`5)[*2r-.DIL1D"e&D 5)#J(dU!t4YMō3[CGGS,+"єmRݼUeq*҂MĈÊcBg;㏬ ֠w-5WIdUn4ZIV!e! JxG llhۘ3בT%\]&p$noS8OU2ͅ oz$f*Ώ{Åri}^ 7Rns.ۛ ih@ 3D&Ofy<97+#i4-Wvc_}? ~je[6Fu0Q1xi 6w[&<)"v(`\:bWzQV$`HYL2L{޷xu&]<[g}h`n9(q$}Z.NIoFGi-8nEְF2frn( FGTwdW91+éh Z9b8.[ce#X߮, xp kji8g1|\˦Yc0k…)X#M\,kT6 l%mӢQ-8PExw. Wqhe.cgE [p̦.;zu[+׌)ܷQo<]&"r9.k&0ƭ؍ֹ:cXiKM*Ќ d.9ͯyQ2 35׆| ϻfO>nuQ0Ea].?~W,JGp6F B(+BB|k}Ra^dP~Ƣ Lu~<&<_2Zj%*.`Ko_߿e/*}y _~͘(FSsV7D! < gxco87_ Yje&"I`?&H2DZ(M$Qy5Pyhw|dJPk F# RE2*h|YO`,U=X adcj6s{&({<q)b .ID1Q}i\&{Ql&lV4afqM ,J*(XkdNq.4L )*1Ɇb;4 #uΆ'JձNe ZoNaXEhVGXLOqec0ˇW>[3ʪ0b-ҙ$%Xvkƿg0YJX/[#,hX-޶R -.'ٻFrU$}h @ vvMQڲdXJGY,!vK#w/ >#ITHJHlyggtyS_ך9'1r!s.JJ&=8w u&evhsL;yuw\ftsע}4Z58_ Tjo-axC8Baxfq4NFyuQӺVIυ[ėϴ nm0.ַ,<~N6?azb$Dӗ$1)wniShVВSqSM}Oiqes8t?¹q[~{Id,(v#+(p?F@jZ~iE b/Ç|wWNŋ;A>#4pT`|])(4gwܿ??c*N!3 K!fX+y1x!׻7$8BXH-&J ֗z#^ϓqWE}<LX7,$aǧ|(NxzgR_} ePj׃9k?x|é/Bdռ51d)Yeϫ*pEHVZ(4^*,Tshn[X1V Ə~A-GxBV nc} 1m8yՋZR;tgPgwFx52qj/F,@#̊C0u7)KL2`2 xfC?M$z) (Af?(\_*84a)4Pnk\AG& סwa_‚<D.1'"\A>ȥ٧lQ+scn"xM8a$#s%Qp98b 'j 3 ASB׃_i6G -b.|Hs, woL. }WTŵ$:ǂ]P Z4 WٿݕF孏:>*+z{ eRL8o`66c i93vW]4v uEKXkhio'oä@1c,Sm샕Y8&0|ZLpszv&ٜӬbwbY]sX1+a;~k{At[(%xGsPa qSAkkK<^謁 5tA~{S S9dЫ~"i}5dLPev;! ?b^ Pq*ZIݖ $tM8' k-XC4w7@5F! MS<"s;Z~> m)t[8܃.A~UкJCgd]jkdǒsF;ðY5|f1!ϞFM歄fEycI~3DzqK3N{xS@[SP>Oƞkp%=k8spzf%FiE0M^C{ JpYtVzѻC$8oo4k,C WNB BŒE l;c|q'`.iIL5sH ]]nKx-hunɺQaB+bf U%e4~-ވƿJнolJĦٷg)4=RV|F9(% Bqtbj3O/C[G)Ъ ؇e1DbǖL..GScC5a]%tk57Gʁ8X5̿Vq>kU5ϟj02H<)giTAcT' QR}y))4K6aNl\ۡQX8fu_lȘdxPHq#&dnXq Sid_,10ZJ6FǽezInL^3#ųy4ZTP>5Ēؤ{ yOw-~MF8,2I1, 3 X3rR)Zi}OK Ok{z<5 _bOd4"NoVvP$YM㝫Qէu$WKw:n߱}-{!ߧP5*IJ {GS2$U'~Qڟ|&'ζhq1 %a{wAS*h,8쮠h\Z'b?OJLMDUyfՈh.Y MS`9V{;\3ǚ:s{w1v6O]Ć{$ 6d]Ve t?_=l~>cB1L/;eo7Ñ$S~ҔQ b^Yk2Z;ެƚm uO%PӰO^2a8 bա}E? bҋrr0 #X1,V@_k9KNGtnizd=ʘVp&mbpѡ{=|7P.[GP]%TʥV.[K%OL$FұnQKJξaԚ8IY7na3r_{+TDn_WmloQv MGc0=~.Υ6 HjJ1e0-SNf̖׀`t{zĪi8% >9GQ`>#if11D{8|KD[Ņ?BpfQ͸eΘގ^ӃIy(=( 1EYrX8/B,b! xrhuQMnc4wKa)J ;F4AX6Tқv03%0)h:1pK2gN s恞y,9")8_Gɠk:W)4QiXr;,ϢĊB^f9 pe@3_bLȯ삾B o(V]߫kOf !#|/mUG=#@Ǹk,&*c&(5hg൬{͟f(YSBL+v^[ ¯wUl3i=^fJ g*2L>v ^ SlW-FNzQ"@mVW7R[ÍW{(s7j5k,;f̕g ϸukw;<a-X Cq'*h8^Y竽UpK se^5P=jY4ČL똧AD;ܞ́(Wr zYO˞]9EUrXCpdgZl7d1 %`3"|. m\kClJ[42UY)& _6Jؕ\w~ &I$/ě咯d"Z.J"CKwpDFq N؛XG\0Rʰ@j*>V" T#qQvZR~ثLѽEX(Ta`_(⒈L +ƗZ'sB{dA݌~UM8\SV{kXϐ Wb|dI"PHTJq$YBINY4cn!. A#{F¼!Lv񏣣[,2vW)N`3vkvFôF)XXA9i|k1agFӇourKpC9*Fȑy<&C4[]gJsV8#&FnLIj}Or3 M-rG\2'PPfkXҨBKMwL?~++Ml94c,IҧEf5i`(Xd"/Xl&mrXuowI!qb0f ӆnWS@vxYO&= }6z4ap;e6}ư"ρ;5Y8SF|)/[y= B0f0Lp^o=AKȦ?=c)~U nz8/>ýih%Sax|JUj$=0F&{OIe-s؏kf( NdދOМXAQHBP<9;|Acr'71{+; \l7p#hgMo1>{ߥV:]Yo#G+;Smy`YinB Zѯ#LrymR:JiR`Q@j]N*g;+֖=.o./Y ɵ mP$:v9ػhol߼=B%>fȘ ݕ=.a V qF&jů/uv%A:L淓lUѳ\ϥ.EeJhrq؀Q<} x'ohqMxLf ^g6ڬoܗ(¢QEtiaꇂ π$ !)bʉm%'ᮀ_V Oe2bjZyS欔t-jvnK\u 'ᥔz˞b~}+TxeH gWw񏑆3 om~`dòk el-k74A yY÷EE #LbPc1ၙcBX'rd y?*zE'r"ǚvx7_X)꺰Xw *+2 FA]U.tPz #ogoSP' gqqp:q^%1/=ȳK\&g *s)*L-x:','iHZ xi jOьN"'gύCs84HCW3;n*8񡍌!K Fw Zܞl}k54f#5 wM[՗=!T(T@ 22~K'~ǟ>|x|zoqKJ 24DspffBu3 xxtE X~~cDf,/((Ka(CX̉dpDLld~=/%|bc 4`$eO MTJuZm[~̖Jf:MF\oh;1g:TF\)?8iΝ+ʀ?-~tjvlФeI9db b>>/W*uʦ+ &Ggy2j4cQ,GPKS3x` (QgdmmqfÐ=g66]7J6wb`9,2tqPArU<%7KSBf;} $"C4cX15D;ӐI <*V@8e``̶">wb \y˹%XOEK/ 5_hU+ˣn5N Sӡ 7/͙yVi螆mD~*ȉ s_wG2X§em(dս0Xj Ҍ8H8~Hff5!cTjFO*Eu)8VJ‘.3̷gMey50ёt¸ fX*C5@,_jDa 2mcS-i<~w{57;и! \ròAzw=wVl=JҨ>%8G5ɧ\ oQ%4iZIHX=wNɖ,i(F\|%ڒ,T扻Md*>P,j"Pm)&&㫻6İx/D xhzJvDOh cm$9IG!+ ĴӗCw}5)x|LunN5鰶ƆĎ,H񍿋,Hf!xa2qde)Ov_rY:PU$[OL+F wJhUV-;Go}>}ZٖdtfB }}ܚ|6ǂk#TCٛugר)_%4~W5<1zl ir%j>rtx+I~ˬlmxf `Ct23qU\,U2;qFc'ʀlWd(ݦ΁!bcs"NyA>0ҞȄu.WΤ:$DP'/#o7^Tc(r7Jg+x2a"+h1ʻ}OQOUDV` *뺄e[vu$ Gr^_1Y PQ=b5FDď-m`Ԛ HdDmo,mPW%:IF9E;DE=Dwun]vS+/+oI%з Ҩ¢-q;:a\LY92-BLnb)6BH rb{ڒƓ$W]\+215-fbToG9+%]OZF q&Bzao|@kŔQoin5L;ib@8o]\N} 3/p6J.1p-v re6sK'+%9L1e1 Ә,1AX'rcӏ #GE9mA=\bJ |-Zg՟4)>,Z#fLH\i7þ{ҖV*{YQ,tW $mq%4mf%Iz@0'Q.>Hw* 1s >B\=AhT @vR'uCRUߡTLMUMY)|_U RaM6\gKɷ peJh ]!XF PQM%4nZo4Q UQB]ph+1etgWbE5kk>"bpyjwբ<5m)oTXG"{hUB8BPcZP7>7rZv]*,m|lR}t|‘ -x!0k2pɶ||z?V& 樳KCp:%4,};[[Lkj{$x G35WKإz`&ĬI _(ǜ`{4FX$4N8: Ĥ2tN:͘M R*w7(h O`? n6 >Տ\- ӈ <{8 lIq?cء1~z{-j\# 2Лp.TZ|! ~p V^`Gvt<4Xo__OU#7@G!j}`O|ū|Ic{] cPel1aM&>9 xr?2 ?0(Dl:m»7φ˟̃;}y >NYS=?ÿé7BA\9 n~?;6B_2aق8$ү&N":oxz+P.ͅ01ub la{D0yl͎#$_zOV{`)V!D4S?|<>a`}cқQ~Cxhg>ǽ<}t "ÒEav31 +.)v|IhƅU@yϒ |c.7?4v%ی;]U W 4<5{W 6,D!V TX;ȇSƙ kh`iH!"[SeL :S3cD]x~6G^b'<\NcǴVIN(W6mNŤbocNBM/.KoHf(TMkӴSB0ŽBmV([DhdV`Kj3pCe'{ r:3wO@Y'qb-1rٴ\V$˪wM[2 }q?nILOJm!zf6X&S]ܿY>UҎQ=eU&CaM7˼!lcHJ*'LkRVYINcM7`k#x3P%馳'<:" @@s=i4|E} [5j 䦅QbM{> U"mQlމ|LH6O2|P?Ikh}8RP(B59fw}>]+9o694nFzjE Ѵq5v@S/B cR$ /*\ѣ* 1H}PQHL,+'?.]@R' c&1x 2DzKϨ&1J,# s0r%Y@4_9jdVpx>QY"5[:!vV7Nӗ"=nÎ|MҍMAYj`"XPMN*'1\(-!8U'uaOS *(3ΑzYf3hM]Ko9+OP+Ȉn,`g洘_Je7Xԫxk2 FG1CqLY -("~oЦA>w2@kO-y(&c$y2vM URN9M,Ǒ-9jҡHT;]W2#+gu'Ȩ{lO>-5o`}V HSAdpFuE $ywr 1u:JV֕MuuIq2aୟjU- SܪI[_o:̪9<,!$Tv`;DIeBRkREbe )Kt uQdi@*a1߭;6\ M)C$t xPT-lri {)+K Ab$*ȴ+.kI1F}# à ]55@x%EeM#ƍdn2UO6V;D+\&: R>i*Kˠ#Bk`(ca(w N%r1{C:Lꦱ/Y*z x%[177c5F6ӗ j؜P:fcf $x-.=u]]\VЛgܕ3htu0M5B9js*[+On\;u)G[wL>BtWu7qbάw^1434^la' E`}vQVH ǐے9;ȺZ_)I7HTM{:v8^;0E T{0ѢFHlcbfF*Ɯ:ۉ9~_.썪\d7J-4Xǚ].'bXjX%@+Ikhؖ#+;qۜפ:T$t-o߯i7%YOZx#\,uSg݃'x[=%vN1O~t%USޫ\[: 9{F{y_NA?NzE4bzw)-f~w3ϐO*Z33!'p=:ZZ+źF5Ho3e寶 ޴4Si]߀AfPMݖ1^gmJ}˭/Dzi+?6r]yZo 6wҮ޽;bxHwg rj,Nj vzN|^YLA5*[SJ!'>Ayuj l\DQs^g];nusQ]e~_|Tl\?+cr 2闽5B|wM9\Z)J99Sk0̢p1CQky`j\ K$M]^8;s=r[xƷpP ;.lRl㵴Р#jb;*Lp5N+j=#il3F?.3K״vVydhV7x31.::|/}V4>|w5HhHƖ1?(݀sNÈ"Pᠫf(߱$F^&^Yڛ1~20ԫw<oU+cx9zB!\|n2Ɓ]=FcxQvi d-| ;PZА1QbĖ1n<8g4/nc {s_3|Nzc- 'HnHc2[|R}k8 ?!-ko]cT鼃Al[Uh[:8"NHX"`HBiCq3m2\2H<2s!c)RMՈ=(Y@k#?J$7 3jEeժĖ;_:d; ݯ{i3S> Ws8Y贪m!H#t6.֐1Vs[kޒYO;CƼt])e.FlAo[x pcZBۋ9rotg*_:ͿvcHGyvdS=݉hӜZOmR/l_|ѠISȖ(j $J $D?쐏h -UhW;en+vj81}Hd8\6[O t,xH)%R*nѰGRqn]XCF}`}_tGd|l}'l$oΚ zx~]w7*_zk8ubD¦. b]Evs:[&n֮ӟߐvawF A{mE5uj#aaci:ZEk19q) f>@(9|F*98U#dXO}5?V9GʯVWT [ R`,Aˤˤ?~\l~MAmL ZIdt\}󟿿+UWMw͚sϬ3%7}SAkG¿.bٴQHdV IgNjUbtf]{,OlU'3a֣6|z>οRp|˯6avtΏVY9}hTrQ/yu8A^ȳa~Rk%3zorAD-)b\־Գ>tvTo V\};YoկR2m Kꥰt~ه7r?`}v(r.1Ӳ%"u7(=L+c#X0uČ}=m `eT@[/aD1Ƶ@B/MnA5kpYO!Eݯt u$h:itEm/3jc5DmXjUX/q]P裡Po"P貕,TfƩ:/v!aP5иD0y:5(/ZsH+8Y3y΁;`z M[0Von!dXFY9?ySodm @ I/cW| (ڄQ[:[;x5`ArZ.^ewL2nm ?}aPY|_W CT׉ˮ^EtB (4C U'HY)dm%NZ ɑlJTd@V6pk?Yn뼏c'NunEE@W]v!f\CHRF%Y6UѬM2Ȃ6hPBg)w~)c˰هր@G<wP3Mm)w>fM}s6Ux:{D9kсn@XVV dͱb8mZJUKozt noZK*3Zmi4{A/[?N#j [o?. Ug} B,XiPڧi~ؕ euPNgZu'uhA no tjt @u=[<l| a88rny.Ў4I͜e͜ ڙK#A@P搠4nÙEeng톛>E;Ȟ',"#lPF";/-1%P]0,޵u$"m=R2H؇AW[,ˢ8[MI$E2H%ԈPꫮ lH.bń0ɴxPr^|CzekItoNpB?ZY/dҐ_Ɗ)]!+Ctke x0[a,`U5UT>dH!,oQd_kf Rʲ},pw:lHA*V*?xӠ]&K-5D季RbVA Y\tbB n#aD; Z͚!W5("FUMQu&`eB\Rՙbù!CYA=Y6 k"$#2VoS:Ydy {tx-9XzUͱiU ܨ~tyv2^dkWuFwӮvT8+oy2 )\:dӢvHqmp Y1p3>CaZ]} ![džOfp2RWlT?^LG+:: sƣtB0ބ-j:!x@+-#&N+z6h>Ҁ뺅\ tC=LSjEL~! BW^λ臅 N9O֣sfKs }GJí_Z/TTkh1KED# 9e-N[|y_{??k-_ȆdOW.H O>+ ws2k⺻^Q߅mӥG&>~4izL]w_p?knT|?Xǫ㇐@K[@zrsw/3ޞ˧z)f0ɻN?}lim6S<2XVѯ;N#hVk={!2;^BH} X1CkEμ;o-pԪItr (%:O_?= B=|BG6c:ڒE+t-U;gҜ,n2k%Lk؎>z|lgvڄ)LqZŻ ɑ{<(w3l)؎%ӱT* 1PAl"QEg{^/ ʈS0ZʚPg=lP}b3:S (y<0Q~=iiJ*8cfh zĈ$ a^MR 9ňᡋ+V\TNW7G?ݻ#o>ɦ \#W_nΎQn}wssTDhF0T߱pqt?}ًVץ~Dܚ@ç:O} "YVݹb8;i}[["l;|/cT&+Ύf2Sݚ{6֔Z*٭;|0wi];#rf#_rp^h8,LBB3$9rO8V,P}BթC͚uB=Q^9 թ~kޕt+&֭} < vyzG e7 >c=CHfp{wsAܓ,0= 2|/܀Q;7xƵ ZNڞJEGecX G2ř|ľ_N Zz؜]b`]Ш%ث]qs.ɑ{{$W>(MK^#EyZX'Fmw/Wj`b"{kno#4Lb*gW:B,4π7t\JCϺYRs YUs$b in#"ZaѪF0m:]BȼXI$yW0_2n+gaϝ$Xƾ:,#6֘mZT$aOZirk#[y9*ƲqN1JPݱ^41*ۗj<ͫߧ9 WA޹dRnjp]]~R jp%n$#IA]5*ƄC?_18b'cvy8hf}v͋qˈ]ݗ=]\De5u'<^\x=Z:jF IxX+?z%qf21(l\Bu*iC*L(֦2"טV*"6W^uQPsxSjߚ Quc1|W# ּ?bƄ.+/#H+(HC*l[SӠ]&߮HC7j-4+Ȝpu 'GG%=^4*?bUj =3Iܳ[7"{sbȼ&Ŵʊۻ_>!p\]Є G < 1N.;1\f2][o\9r+ƼeY*c-lȖZ9E`+U,]'-l/D]+ehVu^xCoRtJoe}23Ѷ[ [X}M ڭ˞s 5L"U1ZZ,6 v k8g(!a,T _Fd7KItꔹ47=p 1[58(XZM\B-=Z*zFSj42\Qq2)~8hsh.q(D!$^0uyJ㮼F (,e_k22/뤈euOIs iLa$gYN$&>{8UK59{#jZ#ڛnN!Wd8 >gOĊ}|-e9F62/&68.2ш VAAUUL@B -xM4Z2NӜG?sG\>mMnGoV*l̺wh]!U w+(gk9"dsM`!4cKbř{?fa>ٌDoa_Bhbh6F]W鱲BG]͋O~!;w<蟳tWMj70kx><[?`R. 7n5]%yK` =3xTn4|Hv$xk!bۓw{|L¾$`A6t zR) uaL|k cU>b(85eMrJزc(^_un<':沨M pmaHdŸÜШsW@s sP?V93t]&U. -HwKSg1vKƂt7şQܟ¦G=eUޯz|y9cY| KX0b$ZQ TB}onGb4sj?g g:~ݢ*B KAPP_z?3OƏA'%ۿC;[](CvF^Z8)9ڜΕ󪏙VB篕z]BMS`P}u!RQQpC9;_lR}bg8 b]2 ]Ghq*evy^˻{^27\u͏?Y1Zl/ˁ^|f*wtb:d#{x)Ûu@(F.2:asna}AtЉbo8ÒUTF|3ú$Y*=jR,Xɼ?͘ [,:=u9h0/:XesKzC-<񙵁} 닗]3"VĞӛ;S6ldl,}Ny"k`CNɃAKl[g\ӇNg}N{vztAa)X=jgCD{'n-WQ~~cPZj %R6tI…w"ǘ]1wgvdɂRBNco)<=wKgJM)X%oDzGC^q%J㮪k=eD9̢-xߎ3v |.cY]J8"[X*#ޟNPu|R}$3 MΉON׭O=d 1J! >~"e3KC.z["FP@H&R*U^4ƿ7bSٌۢ͑G #Ovwb܆*Ӛ!'DWIrR%TvqST}6SWkN̑J]Ÿ-"lncIA/>`[T:C v#֐ZNqs$,LbAHZ?чÊ/~i60n[_Z@u_(p=hNQ=}ѫIsxU:zYa#[7m!;듷UGsĥډK۵ﺁEl6;ˇ&Inm Equ$ e%w|Q_~vKu<׍,4սhYhY\77>mEĮV=n%F5#ՌlT3Qf6+V".,U'* kR0ډ,==,[ڙhc6jg ((x.^}mt~mpǾQis-y?vhKd0"*]Ϻ >I 뭓4h^1<ǐzI.aiteʛfhaJaKߏuه=GvW,aX0od}lZ#{"j_>YwW/ߒ-}uUѯ$G]9pSdwqZJ)Rk |eSRYZGHzbrQt_m&&P-rֵȁ*cUƙ9 K)!0)! G]zǾ+"F= v F kӠ)U_z)@]## XL1ZJJSS", {K5Zܻ[*8BQ=v6(@%W)5C4GH7z5&2L*/)7IQ*c&X"қ2eP%&{T! T)`C[;44$gZrQj/|[`Bi#aU 9N-%;璐o|QyW}ȗeQHX=ŇP6(hboqhan} صq9fzМhY0nq(>"d/mdBAE_-,ƖXKo ' 0edlrd2w(Iʚ1J)9(һYzV%W3~aTTPQdp,~" w>[X:1Cu IJRk2B \ed&r7JJJRSjŶs=;.7ۻw]4vA3яn&ed~P#^+>o1DpRJ'阗ϟ(%|4(o bR4uYPerI8%V#PKEc/e߆ )].ҺHU)nVw7]mE9ٖA9|h&BlUIZ 6.bs͇P8x}xuT8yrW7y͏?v00ɭT_>_]M5VռݶG !_FlU2\ZKɹXrcТdd.=rk$(P^KT+OXߔrngq>Dakƥ%~Ke $Y˨m^7K;z~ةBpQ[)$R2'4nPﬡ5r?p[BDqnx8[ֽ~=f,g tN!A.So޼31%̇9p[*eҮow 5nqf8NwquzK㯩<=P(\+i%`@P1rD.zbY, {[p}s+/q<2E][IG/!⾢_zmg=ʐ{wiIi/覉l+dٙ4aϙ3M@|/z~}PV!W9Ec*Ǽ2>pW9>vspA` %oʛ1<D1;>OFә= e@a1oRIjJ3,Ŭ$Xւ9^ߒRFZ]NcfLdP_cތU@x2^̢H{Z^aZe>xd(V<|~ˊ|*ά})%.> ?&ת0t%@/Blٺ1}_ֱu{r'[ Q5ǎ';bVu߹,ZvRB~kҝy/ز;{gd䝑 o5}g+uL}5KkҪã؇0DztUm7V~~!dGxMnKfX9YXO/;rqGJNk,QMIJLRYMmxfvv u~|󮛤$3b[FUa_ PlIMO;+8//´]vb*"hⴁD/(C)f NȾ'hZOh/'qkWԡ]vO> +(d Jյ\&0"Z5RyJϓ`KZk> l?~># ]} W?,DWi4I>FvZwjWzu}imSzFFn\<0liR )2Lвo\؈ ^lҔNQXSG3| 'x]:˾m=2[Cz#%7 5[7>zo.l7kISܖ'ӥM4Qc^иbjMTy y/mb$-"uFf:Rc0mA;{?hMEQWGum_% aw#>J D|xÈ2OٽݘOJKU9ϪX\wg~a3DZBS]Pug|\Yggvwz*aYO&,Fo^;&61"5 ʳGozɀ[R۷sC_LLҝ&aΘV3Buq7}="ixnluv&::nV,Xe9o~N*HԨ %ol)BiQ> Zs~NzHq [xm{3v3KHָgU/q5F(sz/Cd[ICf\Bcd0ֽĉS;? +ҫTtT:X׃[^6yn~]dl">hzchGFy:}3Xwpӌuȡ~>D_wXSMEl|dUZfup9U,^M)^עDx>P/GG$]쐔 j4 6T; +d:3'2[~.Z8ENTJ`:SK.QV D +IJocЙPEC΢N")bѹЄEmb C)kאu'c261cl `haq ͭ8*zVB;Suh {ƕ:iB(fE#Kn = ~9G]lZB-8hK.# zt1)ޘ#Ç:jF ] X B)ƘB`KNLVث J)X売)HA3YH ̨v0MģV'}5>6x%mQv.ɊlFY TDQU1Xs V̦XkA D+(Gi G\Ł1춌'H8k H2 l g!1 ZL0ٚ,E #쿚8;tޱPj5H̹V1A b.}WA F߇gFXZzƀ2mDl{Vb}fdYtlfiZ\0GJNt+ۋR>).gulZ.S~ DTεz=dzHm\x"8xl>2(̅E{5@rСݶ&t she22uhסݤ|cgx9LFAS'CGDld?]HegOTgMܝUO޷rN! ͯB'*LM1?-LC/1E8YlXr\h]Os FQ,1ZJ2r@V[lD۴%9*UJ j, *P.oN||v$T|av:*o %(&dEPPK-(5"l.bZVQpa}$iݓϑĽd=D!_G1f}犧Ģԉ㽿N߮gG XEDtȃ~ɻV,! k#aC3p4! u[XA,%cQ)z*[@tVE "\+DZX_K?B&RC17ǗO7$K^;}᳥ +WVI`tZ9:S04[SԄ<* TCdL,V6,Z"!BcocP6R1$*,HQEhoX\II~VJ<ŏ3n^{Rs&H (s%~0"\ct|vlY\SK1Xhh L8$Um#, {›% kb@BP*Vv wM`SQ]FJ#@]&gSYj<7)Ső)J*""i*b~c.dzʱ"<|nY,,fAf+$\9TJfyB㠋/1xդkRf2F%![=P W`J-#*w+-Dy&臃p "oF7S_ _3þ=/J[f5ѶbZHǽ{9$׬P_MX%7nZQM_a5Ra|i"+bИ+fb)tCEEHX|Y.bX/*/)5jcR6hbF_W_\mc*!x>&$CT9!J6VVng/}yf`8$ q/|2nt5.`%B'_'㳶^ѽ<?O]J쫐Atsuytahw6yLne׳ TGZAJW+GZgSB7_:' :B (&xmcRpHƏJQf=guXBe72:E#dg;`%Ȏ{/]{CD@NJ5x'X=TJM7mBtvw~:tmO hGUy,#i~΂7V^(0N~ς} z6T@Dyјe5;4Gչ(V&0y0|yI'>#]|Ik!p{OHG).,UMsҫOo&%击^]߭}_!?--7ܛCq)GIm/Ȱ3A&rc#'2POھr{-p/y e5u0װERmOt冢֛`96_hlݸI؛osXcv{s`_LKhg8w3z@9JTZ$UHX}t -whJvĜH@nZy'|TevyZd͓1o$гS[~!i+ 4}*Vo:ex$kY%pb_c ka,6&|Ŭ@ ƍ$b!] \䂀ײ =#Y9W ,PvUb [+x-޷v>, .T!+/_'˥iZ?d=ֿ)N , jЀU[rk*jCF[yaɡL[B%u;Wv94 iUT\׬˩.D.xVyd8WJB"S46ӕlr%Ľi9Uo +jBr تyu1Mˆ/cd卯jhJOLgc*[UPTXї @?V|$̦5#)}[ξr 7Գ>gvʊt}d\y$sGfDK(.,2>(nq<8LH6m0bΊO_5;ᚗ@[1vFlzd6 ̆',f^[u5/fy>X0=% :a&{d&C| D,Ł`SqXQ3ѳ8_NMIŁ8Ͼ:.z@!7x6U.ޔ1Xg\@XMV aspp͋9!γv="%ٹgG„p" b5l>:Rkk^HDQk FFG%ٳŀ8=-fύz@KS^ upW[gR!Z'9#C^2X<1!Lek^Ub劃@q,#%n͗X'$]4Bh]|Jk޻k^抺Q V90v3= *&l1㌒ jfUWt80tx l(pgQ۴1Kf#'5sNX1&&o3yM;qhgfȊ > HMnCjێک:H\vO͜Db`fN @p8Az(Avɠ[鸳qX/bS7|vLYi6uh$Zq]j߬y%Me( a:Lƃ;ŧ)Ĺ&s?ˋQKkLjAtxu.Z~7z,V7߫Ŋ#jj Ow@<&跾u]=V*=؞ x{\R<\oN#[_TF"BELAB;h¢:m$hm ]3G" InFn}_AkM_~9{usT?ncn~' O1T׷wckM_67X0e?xՍ>27Ǫ8 ^@JSZvUW*.!| ۢHDP6]Ť%8;_BP}xq9a W _͡I [/vwޘE-BB;DE9%\_]YIyXg@c[?;_mI~o$MdoM+"٧0'c ;:\v~zҋJ &^@ݔ+[?os޿#S6{(xbsLB2j)0V7Es`fF͛xV]w^؋#RXlBoԞZ2mB"2s]0 dw| ;{jl>})_.;p"6_ndӂc{ή Y?}|Y/}ίMqQ{+r(S^BFU#h\S ,`xp %xiR}xkdRXinVe, Tҹ9ڠ(؁EJծupx{5K钘/% fkB㬝Z=R@D{nB|`Je|Ǿybn'9ʓ4T͜\{+V@k!x#ɵOj'bs)OOEȈIC ui,9"yAJ]5Bdc}`K4FE=rS_rtauMdTȨguc6_7\} Z#зq/pkǿV>wu(ɳ|__EƟ)KOWmVfLYSwTa^ZAWcV\䂭6}̑y5/G⾨:B~jܨTPN*#:8/-nol0rQ:q*eidUnHmY&{N#v[]p^wmXدvtV4,T5p )rU#" |Z2;X`R$E@֘>l('ooÏ/Ywo Q\>]~x}=_~<+ yVd@YpeBe_W/,R;[ ʹTr!2x.ٔ3y)*{ QF:Xqa~ Q PAa?XӚ dU*ct}j6}:}ҩ%XXtU#OǠ=~:cх'?} ]bGӹ2P0T|$.:Rt` ];-q^a+,aqufl1'ap+á/AMwc1>b.=Z7ԇ|?QvGs:c5` i}+װGaYs?b~m1awfTҪ3#bI!FRK̬6}0  tW@[cgdTal=RemHo\*ђ[? }Jqb Ru' ,)ژK 5T֛0%$T*2y)}֏-p^'MyZ6OŪSz5cbNg:Sj!(Rs{d5mw%D?@βa?eחMy9a8i}[q͗*of\xc4NctѤqlK[ r galJ<7Iv5|gWQVP ,p[U3QUy5ABZ-:֛Q*YR=/{?zV|Njx 3 RӘ˱*h x70ꊝ1]:W y댚poN&@)^p$ت xjB B c++^ %LI$q]H@ \h9.4U[ւ@ɻsc\=Π i ĺbo$3)0pe #0'-ݽwW7Ysk'9I-ۖ[ky/SoS?Ɣrvh׷ }2 iѹ}Og7;{ʎ yx_x/MޖLY@4nc)2B#ʩΚPyփۗ~kp_yοK؈y`-ssni_-?Z\UxC_7q?m$V)N{)cdS/Č2!dU)2eϵ@Znc Ǫ,g6HICf*Wֆ|Y7q%4Ғ5[<ŚTc|s)gwtNo_9C@p6%T=$a[^ڎ_D|~߳mcQ-a>GJɍKntmH0b_FrK ՖLwPpR3K@&m Ha1Gx沑 y[ $JYVG@NKH*TT-1hnvD<|p2YXBhJO->Hv0in#}`#ىm..8?{FCb櫊ax7Lfg- 6N4؆d_e;zv"WzS-]e `Uc`iP+Nkw(>7Y_ nxH扻~i]d;ǜ갱^{r8!ը=~ak mG3`'XKҷj& % s4NHY6>$+r:iڬ(z.s׋Gs{teT^8\V}4(jES9XIjlц7wuƿ 5C_iB8/uPukB۠h^hu P5AT{c!:pLfA FgiVG6k%š@r; }P0FՓzlOkc6V*l˖`wa,=M54NjyDG, P +n5)'\p D ўW:\)ZV:)-%9[v!f[ƸT}wR0B퓥xJ-A;'悒b@ ju˯F;Q,HS0KEIc:r*A6? .DӅ8Ym1 0BRᡅ}V Dəu`h,}0fj ֽܾUȏu6 kW!_ qQ.!k Z?;/W Y-/ 1%#xk}!V&ԒrjϜNJ uU[BEs`-Tts~˰sIBEP<`˘gTvp,={HE'ѡp^@jG=!,O3YڵR2́1ll}vM:tm%돋Jty^|9h'BY22tBFX*JQJE(\tǹF O\ #9p37cAMq3n 鰶09s–v0)UCj׍o$tdOv$<а:?MdG"EũݯKQq4Ŝqs-C#քK,{+QCOVԚCF?Q.8n?w ˳T-> Fb}ΣdHrKkS LE>مoR" eVhGx̓\ŏ7 O\ul> fsI 7u52qzoȟ>÷?\ܼzGG)}Wм>:Qpq:;mJ{ 0\\ x|9s6uƎAn+AK CʚD1t.򈒌 ~ 4nd6,/ ьOǗTڄ0Q3j*0~ηD!9n/ځTJ~APYh0bX]Q)wҘx>~_bP)Q>JZYId~%Jz=]F9h;)åh=$jテ:Fc|-E_uDWRdC_OyQ:k,O">)Y MJ8A?"eX&Sy׋?1Y c?MD)uf?^2y+R!-| BUΫyu)%3Z-Hy y6on* ZUvlȆø })B EH2ݘK$J&aC&6Y{b>q[k/~S;s.rqr9>Ea{?Cwn}p y3 }_RLpcu wL]nI7vs~ӢY/.nvǛP刯ʪ} ċg&79ڏPϖwv$Ց!'Q>L}cGc %!. ۶ ͼޙF oꙷc%93>)_N]pD1W9QUbf;DF Š{5"g2:f[wB(: ƅ~<u0dt գbj'8γ@șq^'m M˫؆_&WW91 <6IA֎1]~4m5j?rt_M\MwyrҙY/0"l!lr0Os:Hv~%( ߴ=ZUN~^+FBPHUbMi*%-3wm~Y)I_3UD Hd Z\̧tr£o2˵\,Hyw5>DJ'3䉈06&Z `Sj,IR]]k}my_{Br&6WzAywCoCK=[᠋I&\*zF[a-qo`Mn#V۲7sU12lݩǟfi+[F۟Fd(o7RW񎻊5IeUL<\B#4[rW\Uur&4_86$bÜch@Q*iaEfq\|g+~sns~%%\3Q)1by{'FyCG<ēk:?dࣟcGn~頧}N-z~62ZtOhjyyO8p(H)J RwJ֝؎|F6%e9*jtXe4~9I4mx.CH).`Sb9Aޞp[`txÃcKB}s0g? zjιcsaIp9B&ԭA;Y۟z<_}n9e:O)>pgLՁyVQ6Kp,~'Y1 W5Hۛ~1M 4G,R=M;:.ׇdVeQL8jFoPP]̜#$e(חS9/ot|a2ÂcPv詻3=͟>}d~oDbILFJkt` ـ<;# s)8/Uq8T*Ú*mSVJL*"5 9\>F`4vOV PhqQb"H0#8UR3=$)pēmX%EK^EҊxr9c/'.ܸ/wӄ_Sͷ^G.8|fˇo]鍬g awf+N-Oࣇo.6zءb@O&POqU!(1gi[V,;6HB^a6nS`N%lN%*34r-f`1 Ec` Lְ*2f,ư#612iSLpd נeb5WLxnqth~i-{W|Y X΍Y5K!ZV\kTNPW& nA!O-+ΝGEqB aiBsusS9x <(w߼([Z{Wr?op1՘2?}k+~Uf6f/c>_6.`=`E Lw!UOVnxˋmD,R<Ws]^2INK?ͮ>\<>-[pEzR-kM<@k;Ҕ llES}k\:9Q(赣. J<̏pTǷTF_'h|N s鿙ϣ8(8y8Փ\j;OAquLMJ( B.|Ef:?+JR%OoozEH+)tEu/AT:  VgJdry)uyBz'mFsϓ!80C^/_??J%WŬPx])!PcQ?URߎ6T3ڌО.E(mXYz7C˪'K\[z\|8uAHlGs+4Jᄶt3@p/\+6[sד`EN?xpZ -!b{W 'ZvDk5%g@0a:tg$ٛGB %4챞]dSSzp`ydlŸUFJF0>)%}_?svJ-fxx9}Sg@ )Sh_!q#S24S2t ڥ18#՚*5)X@(y~uMa@\?R n+-Qe"gYv]-X=糞6m)xۊ*ijĻ]S7ݞSfD\Bb );&H!L9[>qFʸUk_v(H5#L 7)!"lSaD8. QKH-薛Ǟǝ-Ȕ-R`iՋ[ՂcU/{V ϱ{ɪmL jƨ}IĄ&Cj*7`+?2^y]L*w鯤vѨcW"q<@XO@IA]^LW_/W/^WJA 7RGt"8 jaTԙaxp¡5jFy=z7ֶ˯ɎJK&g#jwӔo hst)4WxΪ¶sSRgBLhQI+= s eccҺVtfeLV&SWSj6&.)ㄝOL\*M:X9 D5%%tfc'Hۗi52Pi5k=ܔ@+:.HTlI\|Zp]#ه]ȇlC]㚰~2{ZOoK /&ǔ&UF\sՏk + $I 35[nǔ3h*%qEqՂ W'wϿ'EPaF, X*Tt^ce07Ҁ9nMӇbO,{D tjϾF8*c'3-TW}Dl% Jyqj5Ϟ"7Hs.9o\ PhJ^2EW-k J*a:鬚ΜQ) aBn'gduEE*`jfekTL*Q-'zmdۑF);5kKІW _Gy"xc@v,Zg9|y# ךR O(eXIO jeAfGG%Sj/?0'gdg_hL1Vcxzdt>֟po{8 |?Kfku#(a=:\G&h pXp1vs`z>94;sh&V] ]݊g]KWiȾ Ym: u9-`fPР0c=/բ; TAnbfuO*5w(vvW%ϫnTʛG9l=ըUFf ?╷2y#6+\o0ӷlOosps5ǂm{Y}u'[h^,n=?X#YoJ):6j:я%0q@[TsGQR#D/|P[>BK)َ۩Lba̕x9T"Shj-DDT !Р& )34l+*EVQO\%on1&}vSWBd#0.LIL fHQ0#UcVa c#"ea!VAz#f&g Js ;;EDRG&V,it5}q8~bVP+9k ,@1 ,^/+z )R4

B WK@JR!za3cAub ÍW 4 2JE,9m"cbfh+{=kV]8$Xcd!{8زArM\QH<rTj^%6=ja-`t!(wTkudwT z岽䱞D'Z1''q1!'пp-\7wWt]9lprggRmf`w63f='lf6sYylFmVͬ%-C6ȌS`aZ!Lk5lJ<l!=)Amy;f(! LCUODwӾymW vT6Yxh]G.Z^$]{Î M&V* < 8B9YjS5bZlj媡\ƼvޒV;yՍ}wcvvN17RTQ'?vU6+ J~ŞMFj6wՎ Fng|~WncY2bvG(+Ďv-vtLm*uM]p/x\Ɣmeo&1WӢdiSImA`%:Z_ l T6ݪmVamd85a w>ES *h|aG\ZAOBQz}黂?a]lt;;ldԡP20F?Y80;cIT)gR% )Ok'~^atӦ;簔{a&(6-`I q?D)(x( !恕bX#᷶W@{UPvl'T I*yepqz`f|2ƗgƓ_q4|6l3,ٻ_ ^@&ޯ>}xI@'w{]%! ZD@K#ff;}(+PXp1N9S0ƃds>b8]Oμ^^ ;aK /<(`ED駡1%ñ8FPbʂqH[RSEr/JPm/TszX 7}W;R W$TBH h8PQ Dc䅙Da]BEjk1m!#-Б}69wΘZ'7Za[D+P TfXWU+;Hش{YIG0DGAxQ$(" ՙR, g5S=ȅR]59>؇0w_@H9oµu[?n沄H(bpL5B"!H1+"v 6=0Z+aWW}]$$tKG+Ꮱ!<=dWK=_xYfnk0qvcgP@}WRB7h8I9IkuEV*3&Ll ?䢾R=>bgbUɪqdhqTb,KAb" U)ͦJaV2}nyc.+>}ԉ+p6,}jhs!LǠͶa6x@HGY)-qv4Cj\E^JXeȑ;$G*-sI,>϶^h0|8($Wc~;3bcBAE[^p)j/{s%e3|7.cnP/[-Q)|jE`B8܄Fk(9*3DYvS畘IH^=Џ(DT./H"cm.pA/oKrv؀2lceڹ6Yו;jv%&#DX96;XRKFc5v@itjHm'WFYby[¾"_m\͇y+{̪,{"k %hKHDz4~pf0^G~jɠ?i+n,[lO[(~*{I.Og.MXm_;o>9y3cT؟v~X%4tQfӜRzںv翟9+?ə.&#["/g1&'˳fWfӗf,o(3}O? _ګEj'X?}43sm|y׿O{gZQڽt~=`0hsP9nSh01=pDM_5ܸ>󿾽[|z6o\wlEf7+r^tƘwՎ}Ez:_N&ɿb0,í|bnϦ˿|:a4WBӟQ^ kQ<߾JdWao*0IAwǤfjD&/k48Mјs]>P3^ ūNt4(c!]n:&oFw,zt=o_ǯQV"2{̾>YU>p>S-pd8Wލ.&Im9_ht3.K̤%U^h,}9e3m܆BF RgɮtA ҝ|A3U1H8T~ >PIL%giK `B=`5nд I1)GLKi"yDV%,X V`B:j!I1ƀIb(Di$n*aL?3CARj 2Kx1ł-*h%-lClۿYl`K[-/vU9FH+)B*@T &VMy-) d.<*Gy9ĸ,0c'4u`'?>8idasbϞm{(0̉l~&o )*X3kcaOu 9K!OJ$Y8V""cLFLXJdDDïz:~NaC(6{8mJ1hmOþCjܠ?n@L/@ͭy@7wyyC ,#0 Y>fovJf*0_>V:Hm͛]̕P t*J{B }7$kR71ZjCA3iWǿLpWvkÄe;Bfn:/I mjw J~T0OW_j/2LX0:ce8*F^e3o0<6%},K.H kʭlgU&BX7"llA\X5^,!dTAzRߙsn#p9&Y8V]>ՠ46mgJ7㚻g8඗e/>Sqɣ˦FC˛4xx>B\HO <0nܛ>ns;|L0D;r/;XPqTjP$NX ;p/s. #.@vNkk"ž !Q`\V!+Dz@.PbU: m/&DXIQ*(~{ՉQR\.{_?X\t(ӻPv KV1c،93%ʨ5"TN1Dp6F T)4crZD+lċ`Q.bmȫ^8 5a#*1a+>*nǛR9c u7f懨ŠjpA6ns_Q 6_,xykTbѭvި-F`-xKԤ}:>I!$Z1PAu.Y-Ii,JIEFm/c#Y[_SړJ7efSn6efSn6eMns:iG*pFj8уl2Ri`6HϔsMdYp鍪&\Qt% $H"_Ip w< g 1kder ԅ ҀIruKi2zMڶ2)m:'rl5y͝diEg=Ғf5mg/eF$^#8 qRI$1N"=oP 1Ǜ,jnSxm>WQWTuA9kr$p>抰t<YH4yz3P4R`ѱ&O? H"AR*O1TI4 oIDa^5,܀&#BJzƕk| 0β@1y$#IDJfR)Ո+ޡSJTL6[nfkσY(4CYzO)FMoQ'RIj?^]}ff1 }8Cǫ7sy!`&!>NS7cg#-GӫF̔ 6b:zjI_]},{y2*UY5VpO0}ΊZ)UsFFj4v+(Euۡ6e}xt,Nڌ*ݿzz+. &⁷훧O>|ADdgsF{H{}l.J%WW;C3}>?ќ<>˜r'a H DL+Ԛ Q=|bE?09mIutTDjςdJr- _|pOS ZfSEfNnfu4RAHkjA&唉Vґj2[ӑj6\,:w#㴉ZG!Hm]!(e Z$h%6c0`)Mn{ 6V5,cVak&~Ji6W Fy$+./3k13}Lсc&%= `ȥ,9NU,x`dp3 ӠTHRj6fVi]=+ߝqr/bWt ~v~m@@)>kMB߽ KCuL*6iӡg/M>qm$K:QfL77 [[*5[Jnı$X)pQr+ۥE4#eC$8)i.ui'OJ\ k)h,͢)AK֣@sq$G!?*%ٽ:T(^ ]ccj1s{=ݛkhTy;V9MQy)Q8Z~O䅔MujHW;rHK2%!iQg 6]&fԵ7&jI69̏Rl NJ yծ>CMd8g\<)KoDZZ>[aIJ*p/opxefB_ ޗPg{1Df$ HD^Pʌ0BdtOc6X,Dl b3~klUU2_I%0Zr%wXIW87?̿Lތ~}('0YcW`GZS/V.},~ob7Gx Z0O}8j8ۡ*>T-zST{NcIZ[Z{ lmG0QSӊu֙SScn]:Bm~(nK.2"y8DVFx*t4(Ý#V:8xKYAwˀ U -w XsZN05'՗qhHGDAHMˮ%pG\N[ &,PPA߄tk3̴RҎ+0 ]2AJDCXw̏;Xh;eC̓&ow\c!S|t)ïC_3;r籒l ƛiKuKXi ]g-…n{!lK dBH%\Dk-jykPDoQI#_}P!g`,m# yLV'J*$;/*B`: o$$M^TOm+GA|߸N&!xZ10l >iۤoM/p(C²1RLQ,{ soD-¢%FF31 z?$jܠ?nzoSR%^ӁnzzB j3I62 R{6Ӂ3āQ`Ϙ t$lOgNÑRK#He,5Dx9)^Q0`g}˷t\'\u;H4e X(4ʱ$WrT)in6U"[l!so&lBD 0iX왷)Qf@V-@48a0b1MwA6.c3{uu\z>u2*ٯc7!KZ&ZKWyCsS;.tj 5=ma*, IC {Pt D`TFs7؏BTU xȼ"5JDMk$aJ) }& |œ(#véʓֶl :K1|:f_pC-@d9Ԇ~Æa (q /gapcPt?^$)LyAq_pu~o3.JJl:N*uu]dx) ϣ(ͽ&Fg3w_d/t[d~6&gw iBJAy-悥^*y]܇{xtQw Q[ H:*,+1]n#ǒ_.0@I/;w0ŀ۔(T/'(%.bJL4RKDV #+uXEd n'W$p9["džd.lXĒ5a<ͣ;a1=;OIgeB iÞ" IΤ}<3zFM;0)"+}I 'klhoC§,ǀ)%)8Vjt?E*O0>`lGiNfosteh]æyώ~:_)+;y$P]]BWBu[;13PNdE%= vU"O"ď t$g Jj;yHwہ2zsꜼ^i"c԰hR% @32(摆![JWEK-Ձ"}ʂbi&8bX9LtyMQKMj3velw=M??s]ۃ8+>T)4pH70DP\u0"b@qo .(>*,P'`l Bv{pԳ1ΏAhFe B1.鲯F'ىTEZ ?*EtJ^%:b`sU/fܺiM#LKY}h )GTǏ0DG 0]?w{Mx?q6_5zsHT\q܌{ p)YhƓY|z^|i1b^*& Lvo>7#?6=^_QW-LhNm*ƼRX,Oi3ae= {M48!XBce<D\ؠ"މE_yH ahc!%\ vR[C5%wUò_$Slc]},Ƿ` T]wFjȾԍ}˫GqA duz}650r6tMBzD5 u {5N Ok}>p3cBšL"Rr3 gTV;ϲCzkx~ k΃i.ĘGn-;@o][*@2 W#Zk1*ݵWkʆwIB^(9W2~|Es{L)=ٚͼZfgUβn Ϗɽwaj8yل-HKޓ}+߯%.ya|Ntp32K1M(`y +0Z,{vq`ZP{ظ ֜ JesƂhCZ:\,DAͫ_?>Id &lݤDC]j3N?Fn|[iY0Z;f, ߼ 7%nۇIyτ j?DjSj)05-UZC SI.&U (g{0@Pzz%b)q o!l?0ĘuDf*=quI]5q{Sdﮪ=ႚ[p'}(,ʅBiM &9hq,=dcW@ǟh140yͳɹj"w't\~Cؕg_U΀~A/u:Jf e<ɨ}2Q4J0#=ӻ^|sy337Ig@?:ʧhi(&+/'cK BDj]s5$GyZZSЬ¨( 0\ /\@L4j 1D1*D"F[ A5G)sh'qpV rXqYPQ8AM^1Ŀf';HG$ubı&}-*+YƋJ6XFELÙ>;ˣעrնʶwn}e-Jc=0LLՠcͬ v%;Y>59+. +6WSw]NurͣߪZkQNU -*ęBݺnp8~jHmdeW.btUZQp0 j,C2Ts QCVElq"/yZ+i5 m8φ%8! DBM ҦBfN|H1Bn%3:|Ie- ͥ&iϻ͉{9Zyqńsϲ '>;GR1\0M8d3.Tfn=#F< ts0hޣq"dY7z:^yYoF(CX~ Jg%!˄5בWJQbgͽ|G~1? pc}2N :{RƊtͺg[ IƑt>kӨW(uH~ݛAcTu~I9لu"N H׉l`Ԥ2OL3Vg9M]lLYeyI9P}~gY>xV|flPq fl6- omt3T61vY ӜyǢ|hqƾÛOMp#KEL LjǼ%<]a$eR:#N~~,2,I'PEآ{ RSEn6=Wb,v`0{URؚ20»%/_Xh[3PwIws+ҝ>g$s!CM(h!s ˃*)Ws9ur_Y_T2nya4).TE\yzw'K@lFl:qDO%G-y_j!t8 H[ ¢,Zx 5v=t:cIy^#Q=杩;Xw4CvBmJc<˃k;FU4% xHjYC+:[th}sP[.S9\+0B+`&Sk& gjgx]C6jr n}%S]Ǣ #+_nVk+f}q(( )%L &vyJ6-M2facІFe9S)#:"`8,lQB^6S~>H:q :ry݊+\ݒ._NB:!u31BcҴ/G`R}svB;4cR˞9HBu e&-ɮb\B.{_\b"(yylouȹáu׊㎕K|K;tFD|K;W^#OሒZ-iMmZoxf Fx8%#ƂAj-,0f 𓖆X-sk굓&fZL}xޓ[#"O,T_!IΤmW># ܄O/2[xWT|w j]s= f3[ k 2 zbdd8HpN'9.>p@Hֆ eJh6K4˫t]lATiW]LI4oaScX)GeP&cF=4 jGOfi)% Kd:"8v?wi9yf})+bbnxߤl4PlVo =S)A Xł b .th$REm\=Hv6[R!=>(U'd{8\*dBҥ Burrm$is$V 9}iŬoYcWjAźfV$sKR_tEwj#|A/:aۦTF+`  J[]j[PpN6ò]>uktSPG*2O)\RNY/jȳYr\=zejҀ+E3k Wâ:^GNע=z/g1&{uCUogu{`|+Au,sr/JXμ"mp_oFsW@(&\qIXK{^p{LE"/Ԟtwoj\au!/\EKtdYgרQ6/ۧ1<c۶X?'U[KJr|+}tKXӓpT.k)Oxw,& w՟X֭g?6~w=#|G䗟oV͟n٧wG).Ny[ιD~{yW2 ,'Ž~\>nSQʅ`@g:p4`hBo~gg \Q/p*eQr Tg;% Jh IR"tpiVQ;:㨵Ni] @krаJMDht.%5ù ,hO- `LEkC4Jf \b2xUD %x?][ڗ|4k̩cHw^n6gaܣB 9^k]jC[4@^Nr]:_3@Y֕4W!gX3@{C*' QYC9~TȆ,8^X7,iѾcp& _;]nf O gQ $$9s3J x"[FTI&gN JW2シi]- iRZ+B\78 H}*cDсh13z'9z0,.`L1,m/sW+{|Tk%ae1xQYP>/\Mg1{#fb@f&!(ϋ^UZg=<Pi)?4* ݬY_bhK']_K_(II/̾WJ"uI"5 }nf"iNj.&<fAx։bRvG; qU:<ͺҞ!$o^%9i{+CjnlWeYN Yrɋ(gW ezT~1k"P3ΐY5,ۻ&Y"5 8pAʬ pc,SU/xͯ Z<=Oc U C&s`WBE)`J"5i Pyw&g|'T>+ 1,'O͝^͹{$C֓O}gmNoRv.Yl1ƨYnFx;/;CY'6÷}y=Ocd2Ji,oN#j"^}_liB vFb1*ۄܔ/ 0{#f_IhИI'Meg'+O.*Y@(P*FJb_Ɋ|Q)@^6v΍P^kK7WAޘHz5qJaC95V2#rjB2@AW?b61_H~^ Q!˙GeC¡]д/nF~\RNJ:(h@SLR]Z2`ngaTE˃s? F/s]$ew)aA)=:cbS_?KXX?? ~Z>%*~H)ătOj=$jR&őp. ŅC<QpYTm8 b8CEB"SN3%%/ӲDq vyx}\]F3ټq |iR+Bjp!2Fa|3 O!Ж8i.X˒ъnF%}LWaPR7n?kF^ub}.`1ڴ\vbK&9ǕeQp]QJcnS$4ʉ+R۞Œ0#tz%ŭS _j2kBl9xn#=uKaI")x^yx"I; L¦Hv oDn&ZO= [1^}n`ݺ[}; 9Socl1vZ5 N|*E@Cg,l)?pZQw,9K+ F%.ӛMWS룡lVca{>C?˴SR`5`(!^f2ԀcAgҠVdpyrHmo.)m*Y+mvqֆx+SXFkƹYg,zSQ܌Z^Zas)+,`}Ն<̱iY>ikfZJoP9)UK){pc&9 ^Y (4Zne>tG&3.cE|Vws`]g ,+$>ķϺ+xI9&Dq:;Dn?n~Lp>9N"@)젟s-8W᳹8- 0'A j1Ϳ C_q1jTXQ$yy5}(,cW5q h["AgwNETJ5sLPѝ55A5\uo*#p"픢>5r9Ы]j&P3:qrjtTRi9⦽`,ipC*LHUTpw?N EjTyRI'u\ή2+ 0uJ-ˈ&CK͜2A:0GEͧޒ?N>fW3R:IU};pUy̸ ncNn>ڠ|s\+EEO'<;y'ZxePA8IR2Eh2F5;:NRF )傆'<٨`c.QkB:9:6\J%31(K)8@`Z aIɸ̺1 cbn!Lo*$e>?Z\Թ/ugNgl~3S) ֤:>ļ FUS/|v4871vw4! 9,&|9d,`ǂ2d_ !CE )$A5)"\rLX G:b'L>@c13,`RܔPp2\A#%/JO"@B LCC~,$ %V$mqk?2:/^]꣟."NSB 11/??Ba6Y mߩ~0h÷'"9!(,o;wnUqk-^GAUsG~{yJ7Y믃j jhoSJ G ]X+ _M%I8o}qFxӄt7dɽlRl ?;9'8ԒRɤ1Κ)$C Ԙʇ`t#7oF/.5ɢ:FHmaX F+"wOn_AK拤\/T|xvmh%qRw12gru7"6cJJϦmoLx=4˿*j@HmiZǴh{f?;M'͗'&:ۼ$)Z e=4NOs%řH b+=\ӭ?}cDkѪ[Ij_l3o1Of#'*6i7ݗ ED*dzϼB/'[H[ulY 9iAv ?ƛ[n}| -{*D|?WKOus|$-q! DU{ީT+B,v5 =;7D@:Nu`mؖmǬo물֙آE - K?|֝0L ٞ۾ VmyuѶ^1˜.L̑,:hՅ;$F{JNHVN8# !Ja~G1߫J&B?-RQ&xgΖg唨ը5udn>s{o7 5 `W~`V^\N>Av9`$r ɤOB3Q˩Y(g98`Jg߯ð ' k&p5]}Nm;G0Be$*D DF O1*DЍ+ 8,h)1$$ݕJow%sS*"EZɑL# ?Ǯ 9Ȝ07#|ޛD-֨`.0bmU6K|u~G(@D`>:a,,7 t X[XZwX/(^}!80Ƕ@=trU >B],dߐ\W)~h4$׶B1Vì6U=fa-9xUHJ @Ye E*;X܍Fr09ySqQ]U1cp{8YJQ 3784aRt`_?I⺶nÁ1QnnV̠hHgBl4R Vfa+ #/gy zo-lBeey6ɟR>=ځ֫wfxܭV+1 z ˃^mCf\l5reC#3t),#2WF8ԠW|޲5*67?_njEp&w&]JK4մd ߌ2|}=7@݂<35`UV;i#iX >\(GT:e8j ,1nܣA)B{pE`бe]L>˻R$(%Z\bA8Ʌ($1Z 0'-FBnU0Cqib\b%%*͔K fϗ_.ֺ-SUgL ScjWY7h[+~'D'R:a*bΌ-&„( ܠ~J$f 'OD*buZRAhj~j J R6[ l< 0($!%`F`%5MZ Mع̂kb,1l'Fw3.1dz"6a4XYv<9i/^o몂J?>5ulDX#_Jq d=b0ADYk. ëLv[J1bQٽ`4֚08#o,W{91T"N҇ͱ1RRJX@jͷ~͠);${> !(XpɺosK65"KGJ"ya#IU9OůD S8 9?pa0lyy e$K4++yO=n& )Lr8؀jpEvN'$Sч+U dRXc w!a Pr|:d>Nŗԓ}PIg9ܹjD# /uG]s8,Odenv,cʹT}5R|!uI&` `NiG c=3[o_v1]#/ui^N)G?\öSs>>oo636-&s,0n)1a*cibK+)~CRY|HF_{oyZ8Q{ 10cV j~*[ݙLv]zvY]"Aj,LdF*#H&1APcHE$3AbccF biz(Z_FQ\p86@HF0"kLl108GVeU$UH+ݩ V 1,7*2T" Hk(Zd%8$mEN 9;ULXm(q F`'X$  ! 6 D< DbIc&4oա1U&8+J>c @u;z DHȨB * ; !#Ԡ ; ,1dGtOmptTЙZ ՞Ǘ$;Sf(ʖZY_!t}'μS$1/HLQ4=S1M)$eK ki{96JAƒW\ E,E-jjkR/}Ћ'_?cN«mW֊.Tjw+K)䋴PIQpdZ:Yz9Vv;:L)U+yZMK1S:_K {ioQf ;dG nv>j|6"ƠeKe \fqv.G#6Ӗ\{X.%ZEuHƫ'M@@+ST๗^S\AdǓ9B͗+u^Vv:uB+Rw}`/I$Ѓaהt(dB4MYud5.ʥy%2"G~ҋK>|`'D}FE$jwCDmP8GI.q{{ݺFîzVm 4ll e2h>c`|yL7R,~{3ȶ^cpLtmI %{LdX Ŷȏg~ȥ)f$"Ђs E:͐ڢs[sUÀֱ<"{8u8Ш .Ft[LQ}b8@)U辤_\uBP术]NSy[XǶƥ!F5*DU'.RF;Uc+VU_s /M!;EJCU8.O5e#yui0RĻ_jʣ`>Xe*jG(kQ= 8SW<('X _S&XWk> lD {XAߌga{7]ؼroMM⟂.@<:wjgpOt%g-Y%6,#b3oAfq=N2@^λt3HۣslAƗDOF= %61*qϑ΍~)y~}>d3F Ό b玨mֲ#X=ji4mizNBt%8Qr*~hcX9F\ly|{ji4:6 qN#=ښsF:C0Di$sؼ#7jiNm3X;C`ta\U :DAWEFABk%p' &`&=eV”cዖVv!!DSzm,LX{rH=&Co<8]iJT:̐czRmkCmaWSa>fhcxm.XW?\jkB4Ҽ6Z)$(0JW %NՇҋ6"*(klvQjTG4"{U}Q)^wQjiu^Ij1V7JGX%cE Rr6TP[40N("w R,dQK>jCdE8 !1|#INQk S FZsuCnQcX5.;k~q1Ǹ anATLj2Pk-AS #&4q=޿ o5߄vz73t7h6Lmf0A9qR|u>Oi6w\{4޴vG2j|;˃m`kf9G䱎8SE8A ZIyDqC!Ak3kj ~hG=~fc0ADA9h , Ӊf^jiRMm$vO5 $)Kr.D×e&.I$AݘvNp0)Y.7? ;'UCىyOh |✑%qd ob@b[bWo=-|pܩ8{oZ!{uȲ$ULJ}331IR%J !g 2 FJF^dG  `ᩢ4nGɭRvӧb`u 1mCKF7DLAVxryyW6<"DDpA;f(9 N(e)f6ƚH g)nIpJ=-X)g`~%}Jn0A^p2VSՊT;& pєs)A2̈ĥ DƆaS~1?0,g"Xd@ \sh %:(DZG"+iƩ)+4yY-ˑS4E.)FB1AETj#A)c eJ"DI]"3y9-wM3$u3(˜LDbd)`\ax ,aCn?@=5ޜ 5ru'U Ϗw@GGvәjSOחς~}hطc/|CuƊ1V!j\-)VR"fhFB.wHa`ҲǿgO&KEU)Jȥߨp:[., Lº-=^"C8 ;~k΀Lqy֯i(K3|UM< Hln?vk,yp٢ZYRW :ЛajoqqZ08u:9a;s(L/U[וڟ~oR9(,Er!$9ܩ0>Yڙ1[Xٵ ` 51j zvZbH}7;e=١zНݑp7uΦ.8S0}>Ik֞٠Юp`ysD#ۋ9/0AV[4B5>z=gKgbdW%UkvJr!(%_޶ɹ=K\TV`* > ) 7HV(ާ/C+Lza *lF1Kh\=(zOv?G?L19.ؓGgi_ \7uֻy ^k|KZIFczėlڮc}KVwdvinϗ&/H^m\kR+j?kSOkWx#N;E`w~o-QaS"#RrLe&feJ3jmfڝEaB _c[Jju}L gX&nmgnB@VSxAJ(g1KK!{5 .!$MYLw1 UlfR92PiaGwN8sqBn3!°[QKx[ R>R W>M/Eo TƞwYdex)h♎V"38^ucwĝ$`vXA5K+5bne.*۝}v]gxN?YBeTtpeg'Ƴd䂃sk2QX%Ⱥ;f#d]U'[!&]ce@8 `+zImr+*GҟZη)n{⤢mgX7`m?krM辩g!xxo -NO[ ^a޵NJ:D=)P%W{Y bSSH~$_su^#^1G L<|n8$:!y%&?N=u뽒E3M3U>f-y#Lb\ϚR̫'\lNQ$&6!Raa &X DQHֶۺԺBBr zS],X7<A cv|D2̺ԺBBr )BO?ǹhX1#@ cl/of{LC m@Br zwB,'bha c#xϷpy`Bn;\Dd4)rqP,xS2n9 \ V她Wg Qoп/oFF宝q[rehڳ01f fWoŬ>֫{)yim}~I,WP!+dm AJ&x-[TfdZmJWT?!3!k좌ˎ37tɛ&]$bWZh즇cȕYON]OdmbIu5/y ,];9tv p\+u֞N>k)iG゜cchaX\A(GK'-ʌ3 )\q&wFtæp-W]}Pe*h%߉1˟J_>77B H5BJvz)kI3R!jMpHXv+Jl)RѰd<"[QnDbbwH:LQ~/q!Vf9^Z@$U(nTMrM o/D+~CDほO׳/c2JL};I 2fX3'cɴHqn'+@G eKq搰@K4I .5p) lٙTIKf@PS&u .,L%7R(kCi IJ0*⤹qJ;'ۈYs*nϩ9Yd䜊;TT=ű=EE{,7Pg|Vu動6ro'[sco~ ?)N0/bG.|hy1lS*`]uKEcvXp4YbT aANDuڰu `/|Ԭ aLl?{O6_awfے>b ӝA@b]8N(Z$J"E%(0bwիw{/>kM7nu'X=qq[9ByY] $ 7,;q$D5°; KvPl@ _Jϸ en/ %R6%tt8e &D4tRI\ }1 C "=#Ck*.M%g1`. *OF ֏l߄h"R^ܜ9KS9~*{#;N~RAe]9BgbAL!^tOj ^}8l[0xZ3;n4'פ"uooCB΂ۯ$wsXO6I`Ӻ]Yi<NC5i*adwRo6L}H*qKyQЂ UW )ryc~S X+9Yc03Y"bܨ&ERO{ɠn8| /%C40`H`fwCA9C#C682i,$ C@ vPI0Tç:pdy M*e}Lv.Fݏ*`fvj~ђ P]9wy]13jZ%0D(`rH,P=#e%c%ive]*~C;/v.mWw#˅^Qp`Fat1U@\ذcw$ dcu/& %{^`ˈDx5WSS$:˜$.8_]| 7pq؆_ձu3qi,ԈI)V T_D3 >|zA=l?†Σ:!CTa<6 7lkUGQ o9AհY=,RW^uJ[Q$9&MтlbN51O_<4G1AKqiuF@%t"--Ӓ\+[#ZCBKF* _a0LTDP<%6-[fz%1^6 :6 fH&JC-Q C$XSFXiL%R"TNj+^(M +F1ܠPX NKk+e"-Spi !& OҐ(V$JNc,T ,n#%#X*#D8m~^z(dBҀ F\bÂ77i2HK#n +IPӠ,s^zSIBjo,,;XAtK0$FR0eqV2Q=]L>&qh|Ir$WWSy H5trF9׀lDERŠyԘpUT}ȒHWƪ:,Bd>`vTT_#fW\sP#FFׇ^^-MP#AϏZ‡BOαe_(p;5D2kxb2]* 2,DWXt9" w2ؤSI;9;n%ڳxIAh5Y/ݴk7,'-V'5q%t.rR!9 &o; ~-7Z1;5]c$`OWH=|<$IEM6y2rbX܀Ɛ 0an.pP~Q -Qw5tC zf{O#/Я n8E{<`}s!(۬;EZ/䆋\zNW H*<9(i&`thf2tU3OX'{;ݦȿ6ָashӲNqdzK<ܠƑa2wD~L|X̰s9@ MOSfL]n, G,"{! ޤk [d`1\I`_HɊ1bm{ظ(H;g!>|qGce oY?5 uoĺٝMvgSgG0Cm,6GX3Op>Ϟr9Iǣ{$4]!Qŏ58ls(+|f>H_{x]ϱ;C#ԋFH*! P‘'`vfԀqwx)sR6׋,y-`A'_MyɚMz5S7 q<}(K%|Ma>ՁT2rʝ7sr|BH(*BƸ秢hy)tisКF^)ЎY.kF)r'or;;[z«2O#D*/F~ Fהp,|&$Dž vSS 3{7!G7q)x(%VjZ<iBf6yJ%5QJSr̢Yǣst/JQe<'WbCs ȅ{,nFm-"QXm6*WtB v[lbpx6uL&(\l SFEH=+H再xV31Z~n{} /XW#<?6n8 i*]0qD).ܙ #&]= R0__nM^LyC3݁㑡X B֕&HMlJz o, ¬U9ڥ, CD{maV@,5mEL5Orug;:yEJ֚g+[]550i[){˃)QOfa S YmYzq#Ԁ]IL?߰āFԾLhVۼ0㑠\Q*NI9"FbQBC[Ba\N~mT$!dUW%fH{bE'Yb8Ub1[ړ+o乬nb|x"#|/X(J0c-w `/fIUCLX V@}kFoj&4Y.8+/.o^,pi7/!K"{$2Y2tDCRNعn!ש)lBJT>Q^1t($LP, t+DU}wJ˸<^8w/hW_6o()iG{fOGN{:_j9rj+4 xG] d&T|.r%o~9Lsg`T$+GW-]ryfY%@wb3H} F!!̂Dȷ".:hgCՠ'Rx:ys`ZϮU] | 6mL*X;;L>2sQd-.t@.@e!Q;iМA!&wwSӋ|MZ\ݫETQQU}@$I @xIEoʝ[lm g-,$4~2^@xFmS7,~L hj+D|Xn)SS XI{k fTQ#k^Hl`\rUәcݎk!)Ἕf x+W8ۤnQ2Sdb:!3|$BmVque HkfM,꿥 G+P!0m#@ZZ*h1lRkfsG iqI.;H+e5\5媥l ~.#5 N/4_OHQ!hkC+W+[Z0o;%:{hnY TuG~s>5g݄u%֔&r%ˇUXlJt~Go-@UPfݬGmj%!m<~Cn]ȡ.jns4+x6;[X|BXŸGϫ,i﾿~5@e)r&IUJwMK3pC*B%mGqR'*/Hh-TP|R| EuR+աn4ؙhc@ґ K:)³(weE`e[J|퍃wtVyݕ]u<:D!qO2M&=rMk#GTE iA׆B2<C dOfyAoӠ<5fN*]-نsR.~~Xqw\tM,fvU֪;k~/_ÊTMJ,)OX+*Vġ\Zޏ ,4f:n`F-Pf9g&`6 띍Hf xv_~\6^9~zx-:{HR+trtʺ4]*ND.N5,UF'Q}x4ia27H>K ؆-ϑ m#fY я-X3O*A풵 ɹ8VYaiyWtOYS,{' Nf6pab}Hqļnϓg9y3'Q!hlzx3=|I#8yv>뮙s4mQaz_Hivf}|ʺ"z'}:r]Z%dhj}-G!OH?Gi/w9|y2q|5)RJӂ䀓1#S P 5W$6'~MQ{ $߫bq՛9nJm{=5)]0chb]H<Os)sKo˃oŧ-֛'8S 82B U$qH =] n{Bdp{~)T );ja]L9[|*S7?ۛٝ}9ljVГxTL#OP>g2N eZq ҚLrFi=n $9"6H -i $4’"H! )eHcg i@SYB9ݙo&}r.J٤~wWޠj wj[ۃ(Xku{uŜ;"i.# Δ1P ܪ`JhIqp0H*eD bl8W(C,˞SbsrZyM_Rm elp(i0 )\m h\rGǭ)܇' n-n6kf&"|@*4q Gл x4(G҂A$뷸njz` qdA/0:۩OVsV%xأvpq#bdd9k&2 vlh<(n(0%_.}, izlw?9tDKIkN!qB =dIBAR)>V5?jXskvy+g"1/t~f^TT>b|(5Ld݋?ST8aZN,W/0%Xiu-C.k 9Q>L0Kske@<*y)wq BH$BH-1,UB 5 w#˭|-Mp?L\hi-w}6Z[ptWv XCT~ś$(Jo sDc*QdH[PTz|=oYĀfLȠ$@Z*hF$Q%RĨؐ$tDh%ǂď yaUR*K8*)zND ĬPM,Oc,"0O!`wtJ=g`auԱY-` E$ƔK0+yG[j!=Eܘ4M4Wj1c@' 09E6=+*cCEFQ{8o!mX|*? jLOv9\\ bȄV8dF0"&Q0MpA{Aap&rf3d `~Xf_Oo\ |z74 2dP3+CɴSb&>\`LD4>|I/g/EnPJeFPJ:ӭb vo4R$-o#g^yg#B/\ .t 6cfOn 8kIӄ1nE!(Dp]q r#|UiWگ' ä!E6 1mtpďv is<&v"qzx%7Bq%8KuRu8]va\Pz̪Ho=QܶvBt!م w=)%qh6rHiOCbp]7b}4#JSu !@mA cÜ0*5sNlNstNd50*^-/{aS lbҹCG:aU2)%@B*DbK$8e=@& |x5Z c=lw]"fuUd%+3Cc '^^f IP-<|d!-eXde/t#"?Hq)+. DLISpY0|ږ!4cT@l Rh9xW/<:x!0Dٛ^0'\Z`oD f: ˈ @8 uV:&|=M7e* e43؂;)%]UN]ϺP*cZ#֙#]Ta A$*70x >$F$1(50U>ʟBXhB7pސc܀%b+tv0LrY!LA *bXϛu4gjkՃv]/6= ֵX c07i]6WŊ7'-\ ߭7sx[Ǣ(O9n<+FF娛a{QyJDUzvgtk 4LbzWUGrq$B1Vt&QޮB A'=" ]A!ͺ >@3n9y'EZsQI:puR~h/Z>Ưf<)0 ]٣?b5jI+| z9h)W g 1jXoRl({AFba*[f]fgf84wAul"IMp9aVpQ4yn'{*ZE urvFP="O7%l! )I/CAXnY^YrxV?\XFwBg.mG*]IE2Ps,b^֍w ~E]])#DNjHlۥ,@DTG]!Q9>-HVfAzZJ=MCN,==G 5ÃV!_*Vl}Jׇo\McU->:7.uw`_ 6fZv.>N1qYP䭷f ;G3܊^|Z,{?N@?zw!' G}s0󗳏$UǏ70_Cpk=VKk5 'l ݃?F|7x2~DdvApԿŦgw"C]϶.:&эKŇONwC9- *A7l=Vv5肭pSn:1ţx415axs*I X7Ha5 k,<@l ܬ*ULJW, u5^ɩE:5LDwvk yicho^k+~;iu5u?r7jҼGic[,wwC6KGb!掉tCWa)|^'MJS o-myqłg{7$Ǻ &,|M=+阐,e=:R~G²R3eH7B!v/f]X&6[)?ܿ H,WZ,(j8ti§uE2Ԥa+H\?+*#k؝XcEݧF9@=ԍ u7z7dk0B@mJsôq[pɇem^T5T4{vi۞8c5Մ7{4m=L<Ѯc3lXnj*o56^K>U{WzWO#8{Zk>Lޞ8E%UOQ Rt90a-=L8DTnk.1Hsd|RU%^wQZŝ^Pߍ.K"M4iԜ}U$]Y+vgWЉ-v Rn*'էȣ"ARC><nup;m7qn\z}tk~r#IOps E6SX}H'ŒO wpv1$Ei"CEvCE-9UhJuԐ=jBA"Mjqq?5OumhgACsfzһ{&^;:ARE]ڽڇ5WMGͰRe-n;~PtBkxjgY`N(4@u$8‚aiLxߢ;yC6oicj=7X40H!x5i[F[~8TuJR,D 6-t%;qkHRPY,{2Ȼ/dLqjg*_w~]+QD\ )qd˕[G!s؏e9\u ʖK~ ^/v;[h&uCR re#6814#V=_Ԋ&G )ek@!r9QYjώ:Wr9=hpbׄA){~v9[ #dbpD85%nm"HNKB6$jp$j8*=kB)tJnX|Z4Eܝ>7<7 Qtin|WɊ>K$&9 = 3h׆- 5A? z;F693ػq'i=^"E \(2=Sk?>48*O#WZ'AǜD!JmsWc7u{35oŠy0ѷlj@bfގm.G*CxF+,/-Aۛ9,^1w$*I|U$Uě2iP(5a]g,$R#smhD?j^nѫHUL~2}3E4Q@Gfy1/o~VSr5h6T89уݼjMi9}}a`Aa>mxT ]V??v6{7J\zpO/ZS0ZY/d:Y3_B\"ʋ[3//KV'W$r|:enuӔPB /'bvpQHX72` +u( ɺ@SôlP@CB&…X ;N<ڧ@xAހv6IJ]_⯋_^Ob]~5a~?kܬ@IKA!A w R;LIy w% ̽ *"c7PXNѿmujO0nhk&*laeVj=+4~|LC2"P/81a5Gq(1{}9 ђ]o=Z5H:! ((jgrY  ˥*Hq(6A7Ļv NXx,LnDVwuA *JaXgl s.B08GҨEZi: 5dR.b?_bX0&+!\ߓf{T.չտ~ӹbPo NBV>iѮH vެ1.be2 -&"4KF)1k#'bPoA[H`x $`MAZwEXrRyҬEPp`0cL pl0p>H^?kgda .;U0pQ jfJ˂A @1 k-C@Y$gB.D.`҆`P@NvX HiDaՠDe ZڈYkŸG\v`֯ZH_W=h~3]N0/ F*|QA_\?u>||0[~[Fo#>͵Mg\h ~x8uX:/bXϼ\~ Z,4Z~fWֵ%pe3c>Jl?\_waO3VE^{:7aY!:C8FԽMc-:;)ڭxiŧiZE/vK!OEx({ >hX Nh(nwCn)8䉳hOɽښ5ż-:;)ڭ}֒\DjjY4$v AbPub8Eu€i-hX NhJVN6ݢZW'΢Q,V2!Jv:{n>foWm |K)"!o%v̤(ޕ5qc$w)싪k{RSI\qyIJ(dL߃&)6)l)aeY&s8Ls m^u/ޛŠo/]_@#Ӌ*-\,$W$P* 7ODV(ћO#x|wއQah-Mo1GYj, 5@ 3ypvgaC3e<Ujl`AY`O^ܤU^*F7r)BF? 'S#2;e:ar[QAfkgB%K0a"1)>r"2$H *CF+kENPPw/bBP:xFs%y7TxBS'bph t5Y(cSŠ FUX!t;| yj >1](1 %[̢owlbxt7 zfd14ɔ_W/ϻE/yO}YfxwՅVvv Z:ozхd/l0UA[LGm q6i[VAF^SC`|0zK.niSH\ᤸdC\ZUq%UaO 5Y$&"#Zrm1ƚE#>@|LMTW!h,DaKܙ4- cfU&kZZvg/FP.[-Zmkˋ#ez@{ h6`5.5~|/Qѧz Yk\7/_8dz~1I]I9 1(1-`5_sZk&2){삓imk ܭcť~ܙK+qMҖuP(lQH=RQ zq, Pn|7KQ/& Rq'ʶ׽ ciM~6B22o~zL1I#Oxm7ys f ä?%>+yB\¸@oa+n竷ﳐ%|2" OO.<+3&ϘB.S8@> y_jK&+eTSOTXMNC4 Cl pp`2xV >!^H&:jX :#JlB/u~"(N47ItKHSY=7a4_vHH V2Jœ%{י΀IivJh}m_V_ÔQNWuJ)C籐I C327},o^I0/O}~K)ԮH%Z{A([ r0A _p:)uR2,p8kK`=,32mH2BdLjه}t\˿5o .Wi}C>%Vvty3I)ؕ߆͝}_ eRUf]4mv}Jr5_ߩ`6 $]$L)@^=c*Tj%5QkfPڦGQ4X0Cq/>\:,5mP80_Nu8J}y~9כO4f4hP 5Q9)6TXcID(qfFk|S*j6d`T*~ba[DU2P0$TQ8!2Xa YGCJa,O#ly)l"E/m&'g$Lg$Ŧ'?/9^I(g{֤il]|;>Ujsçu*n/ >I6'"Yq[zS{?3y90ھք~a2]psfӊ~j/c4^`v?Aho׿`o"Mi 5!;1!>!zϕ傣'v;xxr^Opڞ)"_mNFT.} .6-|K=GIT_*sJ˜ɗɗ=9WO3?m| N ZJ)U7(}/*tl_]'K|T^z/r>tØ̑*׌?@jҁX`:>Gcyh6S$ Da.)QaVyNꮔF,@3AaS-z(gOWA8Jg("(loB䕶EŻ[C0H>Yy&fjΝh<G73s3[ځn[C'b%|{!!`j ǜW`I:7&.?﹞ &$WN)ly܀_m8St?`w7++:;( C ?II RsT#`з@PǂXRյqoax|5/c09.'moz(dkggu_p40kDz*j~Q]^>KvI^k]Mp 7t|5":?>zn]sJ1f!ΙNWV% Sbf2D!&:2k?Ai \qP9rh|Ow<W(M%9`jW\R1od*gb4hK,bp,X `eNY#čyLPbdAt_H0j19{ n23yIJޚ\UރwkqT5rzAJ׊ oSydPI-1 5:AM T$ 1#i ̣Nxds!U T9``GJ{ t/@c`1rLB.j$pQA"H !ցgAFx/vԛbw*/n:䉷Yn+Lzt -ۋ!i j1"&W w@]J(FHs,:=LhNc)UmZV++P0YX!RPC^> nkS"MExD(E\? 3 9ӌ3ɥ߹>//`N藍u[C%x.SL2˭*\:#d0Z-3hQ޳,#4Z2~qʙ;fB(-m i.|LooZhe{t1cze0KY>N5zKwWZK`Gn4yS}͕22Di3-˼&4H<.&pkYBJjJB4~?ͦ)r;Y.冟3  og-sC3_CRFQ|&8Ve=bTc]AZhwj~&~Bd߰"75x4kRmG_V'U|pVץiY3\uV.e:%JdK('G𩪰gUभ s^}9{RSVYϘqe)ChcRЄe;ش>{Z2BUzdZ֜0-2U\]p<~UC g}97RNJ։Q bMZRPуEw3|xq*L[f9|Cfqm>ciÌ0;1̓[evZñj3*jAKBbhVɪmH;T[ҦiȞri {ʄDJj5= f$̥SeJJk)զTtST꺗A[C/";RPܣu82[&)͡ɧFkΘ^ :a.=H@]M:4 @Q̉Y_C)ĪWΆ쑿jPùaRhD'UDUCIT#jTZ p D+ld71(`Kdmjjق̩.[?)霨an"* (E7i/u3 5_mB[SEDtc7AGBTNuy>Y3m(Н;u$sj n$TkHf/@R;\q'}@R [ B7@ Wh+w2H,U_fsu pK)ڰ|z߯rJ̤IW_ޮ?|S<|vqJFP'bfݟ%+~;?GtߒIgm;gg7j֏w|ඹ%VLVf!HZPs WjtۈL1S.Wǭ5M :O\T:9&X)5ׁ|s'7A %M>9 AP%ԠehN,ChHFi8/ eZt >LxT}:U V%FMojlS(d2_ogId-Sc*$0ob=?ĬpT6`fJIeR̭X wjzQڦBOB4zu| 5RU~z:p'C bԆɦ ^Dw kY #[:^ "pS,{jJ)`LnPD.q(ɞa1.К@&PP9 ۭIst]H)kOXA%q{l\UQHJ. p m/PATI4 "\x*u8TԠTTj@U$Wp Ѓڢ1F*4]Ld+5rTA型pC&ny$B 4Sq3nyE[At@T "q-L Xp2rAAmCnhs>Hjn ё7;BH@Jֻ_5o-D?cNnw1Iu.K!)`ʹ>@*ѲԵ835)l>n3?sFQ-`fƕ9^ì.?F{~ˠǭ7dt'/PvTJ{|nplɿ} =&|׿oPІ? ~ۊX/-Q9|B(Lwd{J$6d$ϙ7&pa[)1V4Wz̤{"WIekr>ք6-`ɶYs?y,sDJnxTqQMTMv6MEĎZ {&w^Uh {Ǖ/v ¨0 Dĩ֮@S+8'4$<ثᲣsc&xqNE'N)6뉱L,DYENJy*Tiۓ<-YVP#a!荓kRM r&!Ѹb 퇒S"4x*r:&t'SD[ 5։dJvm&(02BJL٘dgoB<6H>I YOht3ڕ)]dZo% l(,N OxTLI aa;eB+N c lRx Q];&-(ۏOv~dcAXg9j5=)0nA-7sh;.du;Pf@ ;-^B#(( sI+ B/$OROQgaS͇$!_IwuNT<yd@wd\۰F:3EXU%m Л~A}don>\^"rS8*[SlKiA%?7>Xػʡ}isM(nFa)$a_./zOyEe8n[mXh?,u#>DO[Bٻ`7H_< NzMk,kq`+ zO8v;8deHQא?YFۚe"Ճޓr)I8e %LnLMp` -L\!g3cNf16iںsd=sK~oxc#k@+i;5֌ho>_n?B!n|1O/bf, -7wƞ~(c/AGW,ÞF1lcoOξ)셋u' k ο{a\? mn{w2pˣ3qHoF4?Y<3A^L \{xo\iznpdi<{T:( *p MD.*Az13,'5%:vgIKFj*3#1\~_#cxg #.H{\+(v&,% 7S @Wpj @}P?5ţ5Mݖfm^xE hgʸR;4]c3UyšK5e,X^uIX^^-eo<ϯL2nd~ǟy]^-kk ~[wT{P䟔c1Dз*ޔ!= yֻ l,bZYum'NӿW64CBz݇x[a墎GhMXL)s1f[>y)((`/PE06`Rm לy{蚧 .p>h\yjW@p#ƕo9*oxs[ٶ>|'Mxf@2Ÿou3O*#8 gVr{r:2x9UU.M l~e &DSRԎU:0!b-Df[o"eZsx4p՘֘WI*Rm&l$`B )qjc -F VCiOFJW/lm1 DL V|z {gRn Pzt+ f!o!Wԝ by~IzxW$5l=L"p.|I/ۧU0 G9.ܝ ,_+UܠtnQÐs#oف|!;? %j_it%eqkZ(rjZuUMnƋՏq_{(cy?<Գ#]&rFAE@:kهnoaAپXPc{ἷo;o>{|BT7l~؎0cim1O#vܛySt]x>]<]}骧:)1xu[Y# )+[|o}p%Cț2 Z vԷϹ>l1*3 R)Xi]{#żT-L3F3epV+LSLZD=Z؉U^M 5h=߻:tzC+j*6>_~q,䮡J]j;ԍWT Gw}>ym㹼;@]_VJλlzT%Z+nM\.P)hn7'`ѦJ0EL.~O|4*&DQ5&io(o%ӇS@8c}{S[MXWscJtE'}$|RMx4l "';t7}\>*0g氯٧:Y}D#Y1^8O3wR: }}h#^*`U8@!R?y\"A7ᣜ"h)ziw+$I:˧A+CL-]а~-'FӎJ3.+4/ )vB+ZQ0t#!z^++pMժzW0€e( PoxbB>XJXp Xh 8 + 6y'Jpp2̛L0շO*E!`##ފh嵔g還o]Vp-nʿO&Rss3^v8fG)1t4w׳tq}ߖ&!]xJMZ[>= w 'K7ן'*]E%wcSL~S B"l/OmdR //??{ȍ_rd|&{ `ma-'3~Ŗd^6-3BZ⯊Ū"ꇣQ5-ٶgG]~/,w"5͖NַֆrseiF g26GV/s!/5[TLqFKc~ *+'SZB,D}δN܁&h$iZN%s;u UJWiEaQ!' hdfe~gvQI9E 7uVX,URRuէĂs(ULdhn#[TPv>ޘ!cj/-K&i„8P-?7AEkYv܍?ժӎ2IB+XRpwK*sn[Z.]^_5>q{׉l#aȜLbBf{XUkMʨ +Ɨ.Cf@}a*VY;p?ݎf%=%zIu5x%8w̏0cm{-8s;+Edݧ=mB=VYqaewU{>wu=K@(V.ˑ {4iF}/kiyLE鍇XjL% DRTlDlZ0٣aKbͪXn>+wܦkm q|}U6%Zj;^e(g&ODcu:K%V).'kHPDEX>w.It/wQh)ՓȿhD)NFsu~7.c1ѯ$1NraQF+{GMs.6HiQR;zw# FT( ܀9rչ5ۣQpǵ)|<:Ϗ4~D}вŠ.mv>3N-WD&6r~./tjr`Q'~NZ=hq/pqBG1/>Q~ vg mF34L4U rsIs^.>E%֜hY!$CbJ:DA2pCȣ-/ !@Z<{R%)E~[ؽ=7W0?گm؀@N bm쐲|8_mA~fO6w:lr"^}7QexǾ&aYU"Mӫa?ߗsΌ)FhfdLTβ^ ^Rhbi;:XVl̔$o2B.s2?98mE?yή1x\jL/&ާFf@qpcOdMp*jO kTGxSLVZCrᔌut-jwYzvFJ!{ʁ+1b=(L̨CmtL7qKdLKz  ɆŮ'N+ Y1Ƚb|7KzـS.K kvkuuV !cǽTZ]lm%cfK/n+7tKy>{vCn+qwzņwQO1a;?,r%nM. ~C+aG׍yڊ18XO-`xn}XV9Ɔ=I!V|ޟ-5anWԃ=DXY}>o۱ֈv^ C r50C [K.&LH%scuHZI;^ X $޶@Z5-ECO&jY`TZF6+^>#)U. eiXҨO@k"k42_J)QIKd&wӐ:9?"Nv2/iYfmpzJA9 Y-ו *reYb2˹'o)u[E35/N؟'Q5YFaٜiE'}NFɜfrO6k6f;d/c:;Mc痗Oayk@nͺpw(_ 9w2M/Ity8_ȆowRCuOXIgRȍWX Ku ZLUZu@J%թJ=,/)x7{1)E6W:ntOW;DSͱ~|ʞ\C V`!YE9 .A<<1OT ,@/ z%p\;e6K/BUݕڊd86wO[8͝,2DFg_aBƂ~Z\Qx)#kc`)]%#. &T _W҃lCA0[@vmoG77I&L B$w#yU 1qdQ0"x=bs,dO.J`VT3^;X/G&J"\&+Z)h0]0CNknO~ŢLZc~ōJǵϵ+Oa\;bu{]z=NI93V6[-q@զ0n)"\iIb\1e~vY6K^ Չ` h7`\i9difififiͨzdnHmg4:5;(Pq26޹% *#Q/U5P& ]E_lK+PT`uEl҅PZ})r_zCXC SA<9Xvd_;|VFR,ʪfDi$ "F^DQV3bd0B|!&WA&nϾ/=RIуk`M Yh{/ /IkbؚNƼB$Rv^n9fd`jlCLn^mNCmH,3Ń2x152q+SOg?\uط{qT`huGj:m061 &!&ZcYrU #wvl@bsyM'h岧qr}iMeNӫY%m}҅s"~Xb^H-֯mahT"K1XzthU j.Ta.U;ٽm ɶ?_ 7byF=vԍvL=-ɪlVM[h[i+k3wX 2f}8r+tا7j}lAB ]d;AZKk-(@)۞3D>9CMNv ;MF7"O'V޴VgֵVa#d,G+RxUUR,PꯔBctNIDxEiGHRઊ1II $ d+ţqzЕ yG̔}҃.ZH>EZy7+ 6H 4 &EG{3C5-?Ʌu6>h+&3Z8-S 29(F*T+fQ΄ϱ\Jg44\[y䋑?VI2 $V*afa"ϩؖiS"KY-Gl)aҞ w-#O%L[DcuLd;JZAʣdZKLj[ /-˄^4:iC _\6ON89R)˷{z$Z9{z2 \6:nIC\B􌦵՞0ia^^ Yh;1rlw1De6u93rEi{ɲX㢸پ`ȅŅa%8$ܞ[agwǛOb<§?p= Fc^0O[)&\!Cq_w$FvRz$AOhP;!}S&p}8[7FY;ɬ43kp(Hc1i=(t( ~\2MÇ E!2P#%BQ#rPnr.4|6@J^<+ӿ]lwL4"CaTAt{Tp.U&Ǿ~8Mv@m) %ygqýNÂTCDO0vPa $dᢖ lWqZNeWEol\>MVm6.dnťY7[Mu>ЎJ׉|ˇd^w&W6 S/fW3ɬV{%E}DA #flV[Ax>2\ёxF˟g"c陱A]c!x&CE㛌wҍ!˯e8Tg/ZQЁw{Y0;I䣰}9ov:BF-w*#7*9 VOKGIM1>y'u9hDC+$Jѳ3 s qRj@*Ȣ+d+"׉٠(6&$޵6v#be'@hbt0Lfv_h^-evEٲlK:rwȏ_XU,V%[^H8`{yj5_/-dJWoҞTBJjk*j'.e,`.sJ96q/U6J+,tu[PQ/DM G7V|,ԺtRE6c}N z(dT>N]aLxBx!!icc$^ݬ[;.7߈n/uk4RNⒾN*O~t{eeO(v!qOEd]Ĥ{eȢ*M-j+wOJA.9F Zxɿb5p}7y;X/h>4aO?~j89;4/~/onzbmLz ɠDdI p:v(S+Yx7&L:1O|Mi3e5Ekk֓{{v h %WrW= x_t?G,kb5 @Y۠!ҖÙbZdR0|հJ٧Z#(Ҧn濯|mPlj 8p; E.oΦ7>3T $H$kd)0G4<6:KFs&7JM$i zoU̮'~;08 x>p'Ɩ+\lFj0FagVVx4Xӝ)LxHVhЄ".D# a)hm&.i#Z0NoRZ͢FGXp8߉\5~HXxũwܛu!1lf-F-+hc 4wK&a]ҵ7N=6/gp"Jm*|ChEz3G~ZiS/kitH8Z&i ŦEb; .iZ*7h'^Ub(mtic ]q_g{JLvZ+CQ'V?64wT sOfN{<"MdS'1d8UBoaNlR̊UVv_]mvAnx|;ႆIJd| C{(/a3[Cj⼺nl: )yr'V|5ho;ux2[Z1"Vz8z=ZŔj4sh٥;$!Pyvv`= luno_!x0lܽ6[ӌRyOݶBZ= ʹY Ι)9VzīkCpԊֵԒʾM\džd|5u(9)X]y^ y#ksjYeR1Ek6Zֆ[LʣlϥՠC-Q<\A8$|aRC;L+b~S~8j\Rc08,@>9`4(Mn/;cvJR#"Uɉ:2ǎ c<m>an1.~yIrpa}|E`*[Hk5-/-‘l+d}- Pʚo`˄p=f"RoY\Qp;w$$v]mG.Bf8xSmݛ0_UTpAȪ#TS\w^)^14w~7(n3C@zX>>WU\U %`"R\t& H6f vیS)v[U#5]>" < 4j9=wWNy#-ѷ KP=ٍI9s{rv׶"H!ZSDDC#֌lcInMvh^7But79.CF?҅KX{jT9!c\1Qѭ&r_FmF6uT4cݷ0C9T7WПYLP[cO|q`3: &Q2;hn߿퇟m̗G?tkɨ@4QB {|{ u ֞k>Tɦ _bmF)kSNaB:oԝ.iVpq쥨+װ uqe*1{Wb"Q1׺mIN+&Cf ocueG0^P9Z˵\͠ࢧe笟HT"T8B$swMAه9ыλzi؍R+p5ԇ>o=^QFi/1e/ HM5闽غxjl#TkE I2EҴˮϕ4d>-8[z\hyjUGJŜJ>kxV -1A~-д2p(󚰌ٔ_Ƙe1fh^W JL2!S^*G^Zq):VmpX'5QL&% x m[(^0\?0!pc$U wApY}I1f |#cI &~ j Ny ĩ=~KmL*OJ_ku2]Y¨: }X_&|h)@ܯ:=~~m94hZ܋qefOFP  /H:;\&Ί]M_FaR9 XM"89XzOCֻ3m=%hβnq+kN@XVG%:> At[]_ysu{+JD7N*%y*AzǞO:5B>*ːH½Z%d|fqĸYbܬȸejDT%k5".RNS_0G 2`!Ӂ,D(2 ym GxC2 E"p!(S w(ZW 3}bLqIk#G epVm5L. u5$5J!VEϪCh(J(6f_ԥk='08 ʚ`<~Ϫa-j1甠fN>HxUp.YQE"d=!))=(ĖYSU"V4:2z"/GV"+f'>$OU+^dGG*ֱ.JriXI꿰 ~ ("7Ց̱CwTcE?C(`Ήp`Hjo`?s+r̅@`g KTTu ԡFO:1ZNN'1,q"u=(u FpCQHN(ExX R% I/34uTuT|yGCXBQE\)fhZ/ I艦¥x2sVwR5NF)V#/y3þCEf\bˉ@ ;v1 oJΪUUB=u:ZGtRNj)(Rc"vt- `Z8"#e^dX:T:CNgWolNBɇ_F~{m/xfoλWovϐ9Ogwgvlzn޾hzps@^w^ͽ0Q٧;ʋ,_w^w:?2ux~ϏMo~GgҘ3:KwY|>Jy noCvz.o9'H^^p>xvr:WqCϜ1<8<9fs7gr'9A2{I(\~E7RlCבqa5)$8vn#xJv/CBu7wz!.<; >fs[~lQ'tf$!1Bq~/|^92ۃxua y&;Ꞃ1 /N=W{'Wop/~7ҭ^ob3 Du4<9o2r9W`k~guSnm@ $;7~!>Mt3 5AFZT*n$.< nF'Gv@h6{Ca .~o:}BɢswGZtnW>^n,ӥF6QȪD'^s4Ac!9^r$t*X¯FbVh&4h&h&h .Q,R$y4:c@V8qAe  \ (uQgܨpfk{tWS0^8o8$L?̬{a'u/)ew #QiTQmH>0XPF{%J&F tt@3U_Zl(L'a_務Y&RE<X3dlh057fyc?'<6ۘYޘYޘg/hfR AuaFAF3RIQOz𒵕+Z2th#t06äpAK⊆$NlKay#8bZ ؀c,0J!)UdHV|iBZz'nWGR̄GDpEsS}&4HJwi,gLQtM*C3i|&Ϥ(&Q<\n۔J*6UXTIZ#(=:Y-,R-,ˊ@j*B #wJuW.>rjTI+R2a(HeIi2 h3h]&T >Qu&(N%E L ϫ& Tffv hgv hge[t.y-]<oDbeJЕcuyеRa W]A>Z22BA)]( Z2)H뜋Q% &j@uF ;T.UZin&^c`1)YiT+?*)k ak=񞂔E L@#'2c/tQ-kipϚ+^sPFSW1YmzKJVx'H93 Y$7D<_őU :`LOЫ^kM[k i)li@"͑R^II4kO6$٪، I`JEM%ea c1.g,Yɒ,IY'K)VbR0Z=:2K:A'ΔrS)E,M~`QΑ@{{c\kSG+WQNxó+S 1nK~@ _8䒥[ o;j#68ZKQI ۞IU+liKUߙoH0 AT GǾJwOzo,̓T1ʌ̔ޥ'nXˬOTAAךIQ-MYmMKCkó\k. =ҒH_Mq- UPڨ0k,\$Y" 45Ғ WsZ(hCNcLBxD@Qf QEbd޵u$ٿ"`w&~U #,MF/EO[^`VSEQtIe^avTv I UchkGGQXwSnϗ^ì;ٙ &w>Fe|Ȥx}7ؖ>m8n,>ϠcۖѺ`ra|5pT5=ώf,K4RեiHyOd;!ðs(Q^@IkFP|9q?BRȴ]%J`p43%JFN L*7(%H{;=Y?(7F-L_ߖo:›笝Ҏ`{l~jO%sN>P\Qǟ/vb9};GH>kgIᦆɁJYETZ U [P恇GO0 )si/*`l_y=爭6NZҺmn^,s8x«],;=Ig$;/vm[53Y?.No&uqk|Y\9`gJl{RQ39A9 5)EUEkW ک3Þd޵Df8w8w哏áááááw(=f7}u߅t7ՃsKNeϘ>S. x:_1kQ' rgdggDPk%AS$aƀydJ[J5@<J>]SpS;m2`jS;My}zv.=P3f0־`?Z546Ξ} "Tc Vk (mc+ՌUi)9K2KD7cv] `pw0 Y2hsV`];\E4q՝ H+(M%YvhD;o~@{  *q{"S{`ۮ6q3_Bh]'ӎͱ&6Z!.DJy bS@hW#z#b3xn=mo;x>;ChsP$m(:EjIbу:K<Fw_횴vgEkB)1$&YsvMsT12ʕ*j꽒`jSUo$ǣL`jS;ڇ3d<#[s<;3`B3B`V>,-+aw~y@=3jdd]n*1cY?n.. J PZ9h@r9$Ȱ(Ub7cv}L`jԶ>v0L`jޔ(spjTӳȇsS*\::)ZEn 6Z+/P`6 i)(1u?_pCTL!p]t%y嚹Z̴<| d3-۶Hybp<?,v ^W!s@;5Ugmw8R;FGD΄U lTqr}E{"#z͟\ҿw>s1{}2M#g4e}t1Ru5`cFVU1G4KeK^@WZ>;ܓ EHME? &Cb(R Cy˚V'sOdϮ;ywέN2Y}MvT5wCD2 V'эlmfY~{`= h֡'qa=a#V= 1?twNoKϺ8A'!&2]AĐM)((܄jCPpP5z=tHܻ{݉8EU{ x=K7:b:__W?yc29 ar*u/ob'yڏO#I雏g'rё|ى)mv~ ']4/eqƨF \|ݺ^囜W@c[ߞnUtM`2XGZWQbΖMLJ\U&U,tYrgdx8E_S4& "Bjx{"8WڳPOhd(}_$IRl#bVÈC"]UE5ʗB(i#faimڦ&Tl046 Mz8 tCwx2ß̊R(2DR5PS8-EoKA?AL*b vYwie)* [kw4<$o5#~P Vrh8>>^jw2?yȳ;[/?q8{5:ǚϟ?;$zm&lu{݇O+4Ţo~x_2vd&ޥ|~6jVb+wFcbMs2DBbwE.aKemjy%h𸜍 n8ax&Rm(a(I./QO .S m xVj}BS[ V+[+R14k5vm]rP$Z_Dެm;~2ʝ xTjHx0LP& ٪EaIHB,꼁C h[(#ϭ2KE{V\_NCwBKVvYT đFS D"zFq*:!0ƗreM`H1״1bOeBD+z8sp%g^ Ύm3ZN ; um\M6؋3 bӋV^Fb_88I~RԕDLB*!jJW⌷16-E2~38G:2;Lg͐ƻȩrFI4eӒ؀RU ʆ 9;  sT)b J_f%$| Xܝ/qSDOJ`tMv"at)1}8SI]*Ά8ݠHH԰)yB6+ɝVh2Y"+ 264y+nSCUOGfHe2<%CRѤ@&> (h C*FgI'Q+邮MUת1qXQ|YlfkP[2+KPN\Ja钭bRU '')z֜Hr"C 6*2HrW*avkW)~=ru|*xڻc{S]ίɿN^x$b?K^,9wj5i& >[|$OvxbW$@u=M;kv˷;ul|֛9c`7)Wsoh$&EA8}h$ѱHYlpw~?:[fͻ@T7nGkK'v5=/c; 䙬zR.>1RĄ-QR5L[Qk)"+MYJH\ {SR,Y}Z % Ei2ˢaQJ~{ d#QrYy3f%xs +a6T,J^ *JMFF($܍*VP39$1?d]wOaj RF0js%wJ,8iן(;cI mEnp7n2N+$7=`4Sҳk&sO. C/6/4K8i#wk\dsdR o҃u.#7ŤHD&SʙQrNE %sي,B(\$fCfI9i<+)q4^i!q15GJܳS|%GA\3<2SNy|Jk՟PDȉ?(M 9@Bc=gDwLc=OPK| 75,&]{Qpjl\-D>2Ehʸnc5V[7wvzt]]1 ~EL?vČ>%0kˎcy=kjP[nė92Ρ BcPj}vz8W6E-T ,U y,fU;VP3'Ezq--un7$˃ɨj0ꪋ+[ oJ s#d ,Hh_0^oM/6" 15#V)}7q=2dzFzv'7T@~4Bo2\SLꇈ.1DAe:] \ykON&|J&OM+NW̚I:=0嬞%,#؈2.i^F-k4G`˥\XGZb̔b/1\'rM*f-0s藋o; mӧVMU_) ؍iɠ2^Jtch3ˢbf'!iѲm8:|e'钧'?Ƹ_Ŧi*JP1c=~W30Gɺ6'=#CmpЗa7R֪iE&@sIIj%E x37ta0%eX z'sj*G5 iRKUk ^PV&G-SYZFߘѺ:0 C/ƈ$ Eћihju^OR7Zs XvCQP ,ʂ *PM3XAr}d YdAvpk9u(+ 0^0-,z|$4lZZ? - fDIhH@N QHbEۀT]@S* ȥEP}k*f< Z,w{v $Cjm-X$YxD[Pbb-g"@Zc{kx) yۻz F,1~DRU4LJ)KiHS7ID@Zݣ"BDb|$@z1`L1"x۷&55k6\ȍ%+9hqg[mǕ68)SF*'ʪύ$;'K i,и}[[-ɑܺ lRs+zR1Qg Iܚը2fmyo1QpSMwB],$Avp[|4r}۾5*_\ǃPw (Gl-!-IgגJb_+6HΨiz{UczuW ;-)rMMa<&.~Sr&OpCO;9 ml ̂)j,rlD؂g*RoFVkk{yW9`bwmrRTw[!R`о0`kRX桽%PӘKc5%k0t qgQ]'S1#{|A}9fq-vv< kH #mC?MԐeʰF[b 8 ۲rŗC6c1T.N{*np_yhλ&& 'JT3 0%-"+cJ6P[ej- rbNOؑSܽ<K"$GCZ HQn94U^N7N;d8Ə E: 4ُ2IM%hc79bFn3GNjҨLUl+N2A4{+_.)\҇meI5d֜[)Hzr8p#5:Ct`M&Qa|d[}1$WV;SEǙ4#f%XDQm\Di1X۷&ot)0je4CAX=:>ISbiKR"8:Ǥ:}D=är cpn#^}Ɏnݠʤ]nN[Q Nnh 08dZmG[ QqNRtsgMcѭnFLtJraoɹ>k6 hJLJ!e@+r&1 |m0\4N[4t)I[7݈ B!Kj7&)إ{D5~K@WZഽΦ蟿2o&tTŚmq"`$1H&NRT@ MKw>˪DkR;R(1df0G8_ `DIpGR:eݘ%[3R; v&Zm/a,mR8`_Of:>`gF XlggPKRrE$ ܋"졏`sXTe ;60~3YݍÇ+g[U _)hVyg>MҝLJi h_VaBg:\5Wg؈ٓ4ra?Spi7+COD\Ѻ_"(@ə(#V ڹCXZcҽ#7]4˽5>>5e0!殾} nh$,6!644J\H44bjwdw tƣ@Ϥh3c */.ɬ_InW. -3v]PzyϧZ M'Pn]'wS}6lPpĸRwIJ!UgT~U8h RsIͩ+%վ8{߆i) N׿f ũvT)Q5"֕2UNXֺd5>AmՎrSN;ܞ"1`v +,ܯEaˍ$)y-BN#''H_-{żqĉ;X'n~̷ⷳ5ݾ;syy!;ד={9r4g:{̿?&2';#o+RZn/hO[." UZBHP{j1J}>= ˪NSI_ߥ8)qL)cJqwS+ KHtգT׭4ԉAH Zk/F gQt;秖_xuhjNc{]jޯ~j IV)tUo=> q/3WןR_O?N䇏1vK;@1hKB=&-zLZqWvQx}>XXOC~dCyH&R8>0&lO}$O}h;<ӈ$Ӝ>3)bü-K1&#ߝuD=Yّ;rZ{~i3GA15b4>rgh( 6iOL8LO;#?߾^GEJh٤(ylY+xgmpƸbnO ADثMX#{L6赠LCL& [E@*0KvOgL9n=jo͇)$܂kkʧȵfoq#I+HC 0,n${! kD3^c$(ei Rˑvq2*S oX1VPs\t8FOnjNwԖ^>UhOK+QDa !DJ!2RY|7Un$yʂ߻Ip?l3lu gv r];.EeK ^;QĀ$YY]GS^Q.R O4"pn݁ҜV;rT QvPp(YiD*K픘B5eq B K;#iZ# ~!.f)BsR{XS^jMDђLxaOD.8HF"%< p = *%4"E軡(f{TT#۱|rR!@3ӎFH)Rlla(qV礝g$ʎ6T`wg;%{F=#@F浜 LHkq9c.(@%ҁ9nXZS֜?5ͥ ]SE}RHqn af GV9+ɡF-giِU\vqn*bty xC[]zn|V`Eip3 " ;x dȥ|̷"98=lX=JQ J ]#:QBX_OX3E#T묬x,'x 9=U a0i& o œ5-:E*3ʎ4 W̨2SS05DUNJ!VAQe>)y dJpAw*D&f^骫]uZ kghF\at#RRqE0N.}NuM:6El x{^$"%P`#%|/}$Yu*%=PXFa0SSͰQ'JZJ%(<ր[r>,.p%G.>ј*8 4%qBz.Cx"RT\B]qANRC.*X6 XXQc̓9W""Z"8x=ĨV/"e&\!(4ㆀ6n+CG,GUNo<e8Oܫ{v|V{gHkvAJPZa+4~0{zSÜ0̟BoDYnLIkv,lcAqs߂c݀eJY8 ˚&%,#&0B)& 8On7tQ/릙|FdK 4t'^7=i=mCBy2#.*%48P5hپ~aw{~0/ޛOxtd؆=PFӻ;¬5_Y65)No{qptkC[v_ vߵs,f>#d ^> y=>K`̫V7=B\YeѢ%F@?KE`?>DAԅR/M(M?NPÊ&5fG sP%zZR4|يcR洂cr n[%.FF>$N\K/6ӘX/Y)0Bt8H$C{}LJPbtKBG{; B<ج8oc?#l٢G݇ \g;5N}܎1I;JZ@{l0 L]B:ЮY %x.v#”"l`PT 3%vH[!h5v;IZ2(48ߊ-8w9o*XvFZ=b b$yӐ Ɠ@F];5j4ٓ۶&TkBE$<=u$P.LZ-T+::nB4R=MVli]:&UU1tMc{« \v!T,ӢCqjZvř|ES_s yҊgRUx6@\TV`#.%@f %{v B*+,EfWk&ygk%E <u_}{R~PQy?G#)4Bul=Ri%PdۡT+$c9NzkGj $F6=.4gђ:q\!JBȷyޢ?pIkjPF{8Yi3!t)&M+Bhrz; 6tZZl dWO<0rSm&#ufD2j.);8:ǨGqblcUN{ueun;tS||>W2ZS,Z(@_4͗hs CyJ%?ʝՉ`H9h'>E.hU#έzZJ!&tBtVY"޹Yq0q3u=/Nt :/x%44$ @ 5+[{c~xsa~q $L&R~!HHn{'t8wtm?ġ3_Rf^]߻A]/~W_!X}j|q3ģ:`~w=ӍOAwϯ{5ra<2]H/N:tkgD\MkxΞ"+ANz0Y\$?}<#sxK7\û)%&/ǻx$oa.?{'?a(8KF/0,> ?~'$FJ C˛{GhD n<4,Zx4ֲlY37^>毳!9o OC?rIx{w^xdl^|%SYg=2rwѻ*hكWS-fsfvp*@_a];(LbbhL01=I2|,X.a ?D Wmno}6L>^.'.6 " 4~b4ubs 0΃$Uy|aKy3n(u\Ebey(K^ao|NWS6M'kеmw^^ ~tӭ(ë@O$ObdBPYɗ}$|u ]vAeG32:,|8~R_,4ѫ'υE4Hf.f b7r%sZj }C 3^i|I <ӴW/M.ѰNDx$U]koF+pг _ CNAk#q[1ǶJri~!)ٔ))vWh\3y疷plToݻsrr?pxigJ6ɿ{<<{*__/xOedc~2m3wBڠzk&d-T4Q[ui7n46,7ccf]L{59Ft w18\:-¾==L{$ݟ,Y샺b}POi2TՓ|w9)6-?r//OF7_ NPbzoOG5]rZnɽr[ڍ?̮>L{s!re.˹9 iᾇ,:aDq).pWpxnGr~A ,%o?,gj1`4t9}vpT OkV_3V*cwpsyqAh| nAٓTzpnܓmNUy/i#"ggoBbWv<vľN=v,:uy-8\^0~tH_܎'70ig9H|pw)[.r@iڧA :0.75?'z?u rd0iFh?/2W_KLq$v2ȝZMz{<IWta!"z!IS!L$%RpÜ#%|qg|"is \Gf['ˊ)*Iڍ騠U'(< ˠjô3LgG`o?`-E*f{` [xupiDIQ7lV?$bӍ5^8x;ʒv%=( zrw!BXΣ õg~drH؜;AB9AU[@,2wp:9&bRJ7o |9wO3]䯇6 *MLrbQIOBT ɋ\PR叏N{**BȬe~IjR9* $\OBrG*B˓&9xxdJ= '{2`S$'ʝ˨-.;x X{DN|gfb(Wa\.Ώ/^w;%<)AbG* #_ =5Э74\0At|K-6>o{%(;G/QH,/#N߽~\ /O>e"їaAD ]t\3/flv#6^'B4("." bDOBD(By9= \ίlf]%(8sClHȴetL}Aݥ:9Ia͂鶁U25X| ? xҐK3ȋ6!oBF@$͂\A^ n;&>rQTxͽsyfo3a,~gv*kpn( '0,_&2K0mm^m?:r})w&.( 3dRO14O&Upe.M.M^I#TD[ʧM<Ϋr3;T6AwW s Z܌bJ$:/*Føm";=kʙiNolarx<$NV58[[|vk5?_4|C1bʺ7hdWYVϓ#XR 35o mrP־P Q6ZӴgMz+?пvSa:bQ/ӯb+MX19]Asw0mx4[ W}2cU nx1ȂaLS1G 1TF&,"rC k${鲇rK'^}5ZE‚C -4󾜇Ǝ{\3CŬ .\a*yُsD^]3I[T;bjt"q~ԝR:M5ww[|I ?^0x\{:;;YiDWfgzdDmMY]1Al YFg_kOI!h4US&HCs0(M R%B8SD Ɇɞ "V~+da(f3m3mxff|=sj5ENcQ7h(q40PTxJ"$ a&"5b gdoӺLn3 oRNJί}<s99=-6=) }نV;5sTdهw'Gk%y^Gҳt KPDz+!H^uUi9-g/zz00'؝(!z, vhEbNI) Z%yxvo}H*}Hq%brir<[riV.KSe9W(jVRiR?p^b7MMy%X[xqel JvZ'b 4jODM[ 0jl=9Qc 43 \ sX ֡B,5RiZ&3lYa [VB9B酛i4Lќm6=?s $J<ߕ1$5hp igFH/ 89596ȥ!ѳS q""F{kvZ#H蜰4Z Xp=Ƞ'E1KS"DBc1Ru.$o by_TWL‚d0J$TXCQ%ͦ=TX҄r, ê"e$ƋI Y ea(]fB"vaQ`$>4P%LGP`X2!XDFt3ppE\k`c. q FBmCE34lG0PI+(t!,amVu ?5KX7NրMqV;ug]Z[4vSb 0Ђ^7UNGu%@ ^7#L%Zu8+u$Mhfl@c^.KS`(Xu۔,InJe tSҤ-k&{-P(&̡f}AI < ^qͥbZ,)@Q{yDA+]Z!DF4bkڋoU{U)!!PtRqMI<99 N6:H`911CsL4.F+9SJ lcp0B2N [OY gL Z$AjiFє F?eb9>n (K.p`#2̥BXf[AF4B1$ᴎ3P~'GR'hsBQ4VCŷKBva i0P7GQǃtU4 ]K;Oq|QY}/K-zZdcdE %ZBLvqO "%N (Ӎ;: j:k\0ЅX'SdSX&mXhOJ kWQKcoU+`&ĸtŮ0xJW@23=e8ή2o s3\(QVkY@]*r֛5@)nfnVC/_B0cIAog[uU/=X`'yڃ`V"ZO jGP\HZ X35_Uץ^&W?ѩDjW+ZS+0)Ar[q`n(<@;#9YRSS (Ϯ]ꄿjT;y*.LXqg9sR\-6C5VRkZC9grVX8A 'kXNNA7ͷt? g?寷ΨN~:]8j`#nd@Gh翀IOm7[ f|݅F2j݌ݴOhw}7)Ncbn?u,WŠ_d[(^w۟dx& &$, ԔWe" -.*"q+v٢ftsN* W6(e)->=' iDU8&܅VRE{Q}HsdRO?)ӭO&[7v=YV}dEJ-XؑB;7>yXP*~Ώx'(jʵ3k̿c};vmc~BGjr֓P|y:r,N$-l۟HVX9Gڙ ڟgs;hShHϾf=qg2+(4*^nkUg Y3xl$ʁ>]f(j88 )0GPj0vbvמ]}ABF>N)9~#1aԈ@z *ƫB9eK'>o0#g3;?`5OeNlmlU̒7?qH!8\uY<߶dqh3NQAiolHGcɑ p/Gr6| 5Ql|>p%_@c-p}9KۙZ6"T>Rh-*:9$Tk+kXf#5OdG$*}?bSH`I7UΫdceU;`w '6Xop"j]@U (@4Pm) g8gF9yYO5SNrKsOJc7JLsecx0æWSu;|8G :GB|8|t$E1RcIn,Fp8k]'5^qbgO'4|BTz?frgBY̡N#.5j}LY|H3Xf"K/Ǎu.`7nٵڱcH>4!EMШ~dC"`yF 8} z/@(-(̓Z_7S*`XHtEeD6xηeY>BcGsQOa4PzgGʊr0c-w?A?e\?Yt֨fi2铿>xvWO]Wh8>[Hocϓsi܃0I];.]3*z"+*;w_tb<.!,5~KVT]Έ4~aAX(E*^G20ࠁ G(I@ *?Z`A_^iݠ#}‘3RK97'âs.e)LRD"*3 བྷ2c--PQEWYU#Kԋ)>9GlF9jDy6Vmh,TFz>Zv8g1H uYQse]||Q#ROtّSd&;*тF~}?H~K #H.t[ -'od Q#f58.dubN~t\{N䎣Mu,?^_ܾ[‹pXVuN-__X.^]_U֏~_ށ +ʯ_vÖE}5{ZK3? Wdʠ+DZC:5棋.c"Ψ |JYU"jmkv\cx> ]^"?.`(ai<;ڮU&|(R@>RiF3VRLzahr6 crjsЫ Hҡ*VJYFJ֩dxk5k/2+zVRáwo/ݐ@b83a"/!Rt5P-PR21Z-K]=J~`jky$&g5l8O2r̊# 0tePuѰb kmJ/Mk*Ice(\/{|FhcI(xuUZR( & ZE9J)F *+k^qAfDFh٠p JeX;=$bD2}˲ R +(,N{5lb%[W5n`#J]vc>ѓe^_m'͑‰dECb?mtAֶtArkXC ʀD-.Ҕ+ex&]  vj' z6iڦ{lNa)[ZCߒQ6: WJWʳR* #=$6$ħң2S3>_L4ɹŇېkn#ޅNҖFn²wiouv?&>Ӌ9)We[vpWf1tp4JPooUq%ۆ-X;6JX0.%Jb3NaBq#q[N {Huw~qu{N,^m`M?0Q "yInVy^Hܸ:rRN֓wNj[g8emt TJ[*K:ĔA7G8suyQT5>U|VzAS^zAU,-C/,YkC#R3i3!ljJ7\5.@fu*v ,2[ p-Q;grjl+=ۤrXgpE6JCvOo!Bh49e RJsӛFΔHZ@9>)5SJC&HGWli\-y7m.6Ù d m)۠DqHvfivli (Ye \b!e"o-!X~=@vORiIXEp1trAҢby|g,nr"i7nw8b'N]/r2aF$̄rNs\b@4 #ޱeV| Q99 ]D5)WKQՈr 놠'!jHGƔm嫒וJ5i_9c=` .ˎqhb<[RØ.nym-\QƢdm 6*-1⁩`aq䁙-Tm&xI:(d{kZ2UX]nG8#Z'Vv ˂\ȿ1{Ձ7t3U_\h ((M,׿manO&]ۛLöP`(^w|J5tpB畈ōޯMl9{漆QXW1"}O'$gh3T[SEŖ:}^o񷤧ÿsqD5G 6^^-von|a=&|MHݐ;y8g6l}qsM?S2NӧzxJYϦl7Veꋶ,W{6 _Fl7w8gs VI?[=[Q17w|Vɡ0hʰx~n:d۔fOheEBwweÛ-8YX(PҗY d 12:4nNIYŸ;%\) Z IC'59st ,X;o| ;/ig4+vwqiVlȺvq7ㆮ ۷vOˠ,ǑwwH@M!io؁JD"=9p"Hh=UeUXKui EK(-䘦]|ojɌG>%[>:8Z<[eQZJ:4lWcm& b_̬S;v >G\B!MN94P^xˆNջ=\NҌ~Dy;=vFcRꎱR)2G[)E . TT.":a `Ѯ[kS#ZiEΌqXwW}#gqg3NEiojYe~c{<3]둜Mb QcDc# i$NoV ^s܌B-T کgښ۸_alH@*ڜ+NN^R*q-QZƐ)93iQ)84n 4nO~[G0ɌQ]MU=@np &ld@ ++8ژBRD*=IŊ>ubtƚwgc!,S_avMiZ?}uf7a0ՅOh6:dP.meC֤XNQ/gJtkYߔO( \RQZp 0"`Ph(V1G5A(:.UF[Nا쯴qP~f\[miF {LJD V7'<% )ʙX cO`50b tܬZoz_j` ٟfG}ɸ:stZU]ff '4;_خ5+cf{Rd 7+RǂqS3@0ڹ?Ȇ<3Gw~5]IU(ڈ؞jN)3j.zqTJ.T Vl)߿4w$ބLIoC L&~ynޗ r[/)5,_r=))By d)s#y0_PoN*M/C]ܞimqJ2^sL4FɃH~FER 5.!T1u2ۇrho(i.!BݣkO)AtX%QvIOD7u,ۓLh]!9\٤' QܱB9 N/0.Y4$Yhq Y B@E֒hS H,h <7ϔL+%8i\8.߳a Wvu;[(Y uE8Ca*/І֢Ɩ B8muNzjANzbzKؒ_[wA0Ԣu2rdL,8WT7,b=9S괽4"+$ūFq9k%3~y2r yb$y{RհVq،1c*RF2.Y3:WRqJ˂(|x ֟^C*x8pi1ܟ/噣v ~?E?a6eF[&,w IXBie0x 'ٛ4C?^㓍X@M(0D5tVnCA^̀c|5al<Z8DŽg.r\."KK"$*m^A1]4ה&wN3Q[1+ A~Xru{7ޖ>'q{=rŲ7o}Wqt?4|{)-`8O@"&ʣ"Lii"e,*ʃR*Xp) 0䦺%c_VY7>ȥLQ@r0d=6@ 2r.pJ<(<Ť #~2DOs|ӧS-)!=] 0])1lAD)Tu7Cb8xKJRJ҈ &0EeřY/\NcrXJ!Y{h=J:`|#$D4Zrn;Q 79!ѤH PA >Y)`<YP>Nx{A~oG5z?O o0%Q8Tsw,!0uI>~ur@ӧ{~o+Y;oT\&0w>?VrT6#ܿAܽ9 x3㽿' Gp_Y'& 2͛O GգIڎ'x9}Q SRlSFeNq~0򨖵VJ"W =F. +ۜAMPh4ۋͩvf@%CB~$g:׭d4: :̈́0F6ZHM7/yvp`XgzHJwQ y%:c=/۔P} 'z\O5=JzLM,.g.:zᗏhԿzO]6ոXV02g/aE%~)V7いAcxHkd/GJKTC5 p)/Tp#2sNE= er5lΑ?@UP)p .?j*} el2O{!j"cdJsuRNAzR*B^@ٚ^ARbŽn*Px[8kq]mrQa!< , <XtU]Uk+DIYcrUc`5Øs֝xQ*:+.v `/!$=Bzϓ-ޫB< 9d`|V)N_3BZ.]MK~1ЍZy:q<&I{LzO3(;,+>=p7w6ɆI~rCr% HM>Q 2%7B$ߓێӖ(Zy:|^*ʭa?g?K{O rJ@8}JdlEw ܌;0tO(;0΅ߒ;;Ҹ8Qg봆m/u:JcZשn)Ԇ ڋL,O1;'v1(־JOϙtqq: W ffF)Gfi %9=XU 7!ɜ7*{_7NK4)\8=DyF=*0z^zEj6J"'*3^D%J ʑ]Z*T !]UiG 4|k G`]W'^0Ca xvc2.WGehtVO=9;mxҗEgv}zػݏߜF뷓쌂:%=賫hWB^7D9~v(W\m*mzn=hfC9ܛKi.9<Nx x=7Eƨ:&ŒGȉ)gNvywDn<R]HQ+nT*e OTQ>) 1EEJ&@dLfS@GZ`t1rsDh8p4Rh6P+ITr4{ۑ\JQ6s^NЌ?y1Y/ TfuyqvxwUh`/7.C99lg}w6,/* |6{q)ec*qT"*1fA*q39TfêlzmDVtއ1I梫ȖJJRhʭC}_?<ĚY4Хb*։Ym^D!NHSPjU.^ IXXoQސ&hFcbrSj6՝&rSka^n(L$"Ӊ)ŭAfhuH-$Ih1HUbBbyKrM%xh 5dՊ(͋>$;sBESDUf 1^PYy}bOY.%C _ Rc<ӎ JH Rt&D/2ԵIF%)ᴤZ+TNUg 6 =4pV9Drj ʧ7ʹ#9ι.o;y y].w0Rp* #\yta+r b Fb Ԡ%* O-E[(TSAE(S r[evƁxhhashnzsoBOMܝ<]nKk!XХ2@Z  ,hM![H 2D9`yK4JbAu6Ex1õ6P̋@ReJʠ::$}xpѩ(8ZZ]s7WXM퍌CuڪĹںs|-Ȓ;䌈 ɘJʶ$hhBɦD^z|]rmss~RCTU?ɂLX47xRkS/E]b5֌V8z)7T3;Dq]j7jqu!SK盧]@" iIaXoΥ)n?A|58$\Wio~^ƍ$S;?ͦ~CNn7Ú3.{z;u-QAh׌qԐk6\mV"[[d~7NoԸoBO .A-[j2^ @K$*p6fB"!98`ESNwNw4իl95#Yqk4-Wt' +~Ƌk"H;;ݵR@*m}hÝFN>o?DPqɑm^l;S&tbȅO\j.DvvN.p 2*yf3[z`$][L/Q2QJt,j#t,4]qiI>}+=cلD](v(Qv4\"Gh4$]֘$>jWZ XRr͈DW<柿tE*Q!%P!yW4=f]9ņƖ 7\CZμkp/Γ&!AR3N=E{XFk)Л"zo#D`m?f>vHJ5#|r xAC>^:y,D &tOv 9AǨ4Il!j8݋p9NjXBG 摐SJPЮZl鬁JȭRR3 l5wDv/Gt U@M2ylŽ3X.$+KD[8A|fkIM Z^'6{Dܺl3j]N,ۏAb[$#F1u /zjt8l1bC˛ՌqLxm%O)t.ixRPҒz@o\J`j*&./ʜf^M!ӭ ѳ!4}¥F]=h΋6Q^;zv1e{JFOvc.{HQW"\(N!<2),YBb) rLbph!yE]F(*E,m7 ɠȤ}$%2ˌ(ӊdPR4Êxr2mLjǰy9r1$(Ր]>K?HBk7y?qVcܪ{{7/ǥdn{o=,TvI%0^ -"'%5\}?m ;7WF?[_BbW~_N֭MR_n4ٟ&Xoz7֛ƺNƏ 5s+#RʉR"ဈ3UD'V~y_ ~ZwE]Եjt5z_#o K6rh ^͊1G;Wn4΢o-4ZFBofZQ, }{qtW+EQ6.pJ0+Mb@'L,%ZOf~^ ^/ƾƵG. .+sN+6;;=Ď_"JHM4XMz1h&?Ya5VoI2:8{(/5wt08q@l(ׯN*(Z~QxAKӺ.ť,L9,LD+VDR\w;&_|*z޹S<Ky.f d[lbR~ OV;ܺh}jF> 90gz5ZEn.yԋ"t}A2 {2~Ws^6]Y+xiڃwzʀe¡zط_:Q!ȯ.cEh:h ;ͼ|<,0k"/<#?jJ/ Q|`e\ι~59OIn2_*]& h)r*PKeS8dG.@JJe*RS ?ʝ'Fd v?ѭ~GagFc_B I_:0]TNDHi#8f[i$qx#\*&ଘќ+2{Z)3Rl)F'xuF#-bjFZA]yɼ;}c$B2J KoC!vĺ{,鋹x{|ШqGQЦ*6ui9Kxbi&(ʙ#b͒2\ߪas|*MZs-@EԘ;ɾs^1-y" :S%^@^K ;rEGfχѥ}XxNm=d}\W%jQ\wtz1I)1yXs =Oeh=Z(5is& _/DARӢo ot}jU$S[9X]Lg 3>!hE% Iݛ;}a N^bo4utUݝDDv'kdo\`[٧dc J_+ʆ@;x1ebp0yuAzG>H<$|x8ABRni,չĤ L/$ l+rI#,gBj('S!9(j.G&r浢"%M7TSV l-Nդx p\8 R-SbdIB tg37!swq@t}NG4WwsCػ2?c~;ݴ麟9xjWgy>ulsuyP-:?FҳwYP4HBr)}vS4nN!8>vhv@Brm-S 37oNicq )xuަ7{xoӛi΀p62PfVlj8\Sg`)J _ HRt}AFARkHjXƯX Z g;~q9`5o >FO/I61j Ԁ)n1sJBˆ6n}YqhWJnI) lDi!'4lf0)v+mv٤X,޼{(m,.^+^}~x HS{ FQ -|',wKT/1@3D˾p:@] ?-;"My؀A'톋ېET@Pm :hB캴yiuHR]S[Ъ$uP3*19Xh*bAύM{#;u7H'~PN9ꀮ=jPBЩu4h?jb.=iv~KBC~餁K,nʐ :d)s(F"gM9qDR-XXdRce~,Fv)5Q%955K830L-ǚf*Θd0i:yfIJҦ~F3F$+彈JGe ה$,YbBI*;,RNC(7q Cm 1'84 `+MCp.˝WW~G!b^ )(@Rx %>ao(Ii0X( h?ذ8Be$ RF(,|ݗC-(N8iԖܦpGyMxcHkR _T2>^}EaNQ^}"=e*.a?ݒ"ЙKW9t3nFjbqa !kxٝSJV)NLĹ?E‰sHC2>JD$2Nt @QBnhduc*}[#ʑ>b8o@rI¼@l CA0T>ɥ E EHn[S›j#ATH21a˕#J`L$>#D2ƾ p-T]$TSB]Ro0Kj-PDtqYjA?8*SO`]y)aP#9Ӻ``ÙOufB08Mޜ4F.yW v3%d^9%]mÊiB(X d5)TX1p uC )~xܪ"uK?C4:A}Rƴ(SKm?SCrk$:%LJmʂ5W=p\. /M$ :ӋRleuBi% '<iō׊ubfsd}ljÒ )&D,8Cp"DGqTGH")% {`VHl\:\jT'weaEWyJ *MDQ5sdH82!Lq\\ l2劉%o@c+3t6i,͡F[?ޓU42a6Vk}޷:>jtK{3d,Y8fVM1)O=q(12VO],dwy {Joaٗ q_p_dRuxޏ}?*1 ]/RXvnwlO[xvY=+GiǾ}uFcwli?ĩB`ޔSpŀi"cYK|n 2vi>⊅K= ǹ=͖@YYKbpbe/y;d1`8Ҡ NQ-DĴe2|4.BI闻,'x4ʽPMTk<չvQsqܝ'q*ͭYF̌`ԚPmxQVΘb."w5mʲZHS.r۪r*\DM~Ctd;Б@Rۀ:\uw&KKwj4E0玨(ٚbR"}(T*!C"0i:q>JBi&)A)y,RCKw4Hveщ}ƲF1S EJ,Dag`6[CF(!PHe,NM& Ͱ\e{N1ج%^k$XC%?!fDO"V"#ǑQzcqYǭcCʱ֪KaO65f 45e+B(Ei` (FaJ`\F!&8W9to"C#qXYpHx>oHUz,9.='5Z3٣4,}Lh=GgN?&l=4H)=7e|yRÞK.}ǚ+f`M<|q8L4l7|kߙYBR n9&Gra\MnI=GVVxc3h'eݨ<кՕ}Gv80Um[ unMh7t*wX=- Qnue:uQƺp>m[ unMh7u"%d YJbOtThYT^>r-!9=ueWُ+6MSREMKm zXҢ/7*^ڍvݨxߨ=S8YSAchb+a RG4!Sc`~y0@ 2gk:},sCb],\/ͦF_^]r2 4h>߶vz6M6f<ђhv ~'k]DjFtoGQ#ˆ"" "1;["ӭebL"t$šiⳌn푄GS!L$+I^Iz ɘ ܲPNa[O=xZ(lH ( aRcNрdw<HkTjgڤu̻tC՘M]NSTrɽE6YEY*YC)5C /EK!z&QI7H;j4[N|#DS @[L7q$ujeg} OT(eCTJ<G<n$ܪ+ɍ#7afS5B& Ǣ teL%>hhSЅ hj0S>+Jx4[8 )C܀*TNT(dfCㅋ6tI`t7u1G(&=5KߔRrNY "*.W<0eݎ?Z\ hHgZh(ګXrxp9Ww7+zO7*U6^Q a# fʽM ՅE[^ޘ5kwh=y+8&w&DC,ɮ:htP4buhX#k!Ai X>Q@e J1]K'`n+'|F}Hwȫ^y f (Y9jFxAG s\vy&ME*e쥇lc,]ϒ9U.!ewN _aTW˔F鮊ͩb y[F x(,Ta*{"<8O?yiYSGLhQD:Iz6h9jkQz9̭hwX 38fIŢgn5.Kû~@=T&Qjn%JŎX0bm˹K/4D]1BoW P D2R 3? 1Cްi=BτhT$Xcm Z#w3{FN0k"ra?'vLdPPL݀[D >DPB1'lآߪchh*:ZK9y! ' (c߬eJHrdJ ˆm&Zx :4:.pM6343ֿS1o`X>]9RgcЧ6%{%m{4>2(m@nglr.&(U?n5?:[ӳ%ity: ΏlIBbk[1_$[$£vN=:8,L@H22n橐E^JVpx瓳ʾ4"idλ s$ع@$p)1JkrINP@) PH8Yr",( B,Bq̑>|U=f͚PUu?ZOvqht]}Fւ[k>Pm/ؽTW9-$ ܱxz &d'ՙ=cM_,0Hqj⏚~@9ia݂yG7k( >/Wv29*=^ woyy HqlA!_c4%KıV.&G7K{HwP/j3 _x<³|MB~2Ofŋlw]U^\&ٳǗ~/HH٭I?3G36n{Od7?y~ǫgs#n6,?4g= ߞ OFƶwy=pgߓ Ʉ-z&t>ҏGSY$-֠oa?1goEnDVx6W,'i{OS`WoIųU3|xUk5YX^`jٗ޻d}5Z,#C nmGX>~v&Ss_ʪ ۿ4؂'O]n™YG;P.n~2H\nY_z6M9^p)sjGCfN-^LF T̍mp,Z6.=~Ų=оZ=?КP+2xl8|j3O[w;~< $9yzd|I hqsk'i6." ty:bP),@]2~0SgvI7P>Nn&W_-A&]?O00$&>`짔: S \2uO 6:~g_Ϋ| 1.=,1+4$Tdp4^}Xaȋb)njyтHx%p+-EYޕ7;I./Xž2YFrRYAdt 7Lߜ'Ь<9/Q%w.03,OⶓAUJ]\J:i4#עxI݃.)^mnŷ-wth- zuQ{z ..> [ln^=Ūib>#,^33}RN@n6xӏK[t4ms�tl)M^53Po{ύ1KT(-[u:28Id>2+y!0RP [r#r*\ %,-Uۛ/mGێ:<JM)Hnz!BKv p=NĦ]&Oë}>ktI[ck>Pb\.{ϮP;3ZD>cw;u;YUSv_@d*ڍmVO>j񶸀a)z;$G67s:ts$I# 'epҲAI˗ SvNwe,'Tm!hU][,/*@Km BpT[*k HFG'ӆ=p%ǧ0K tj<BTaգ8Έ;!SzǍbK[,l ̦eZnv\oV(ڹ"C Kë@T)= ô*=PWwݟ-Uwr!Er_.fV|;f^J -k q"gU)]HubxobQClo/8A#]n]oUXu+U8VVCutNɱJW:oThSvח7 ';<8.<_2h)uwQns6-9}Sɨ:6uf1yk8'm :'Jd ɕT|̮Nڍ}L&sm88@6ʏiaPNػn\WT~w$n\4SJ&3/IuqH%=5f7iv,C d/Ћh8&rENI0PN9=kcCJ:Xxj}9Ⱥ-ˇs5g29k,|UK?ZB'uvVe/c@Aa,h*3ZӴȓ E:Ҍ*Z'iYĿI5.jQ,nKߋHr{"9Qsᑙ23 `|لF$jYfhאw܆u§\{U︍X !ib3rUɵz~ Gх`($2ie16',,)[2|,'6ayZi?Jɹxݧίoܔ*]_9 &~|GŧM:ҹ/?421+]<ח'Ǵ ?I+(l" nR"xsdan.?uf @ۊcAi^e675blcց:i*rDYWgibj:4 86#kƉ@瞣źDV ǝeH:+ѹS'-m]dlYכH%EcaOo .?r{S7v-or64d_ %yGvt5Z`TGʎ3y T+HoGD)yv#9!IE)_\ђsbCyHBhZ+R noVAtvU4xo=9ȄN*AʌEu$3 ՜'q! ޕpaNB}8Ad2<"[**i~ a/_B(~M}@5~++~yRM2c>ugs nyZXz7QJO49M}%.XB{ɦ8rN O6EXtݙѲ&dFdbKA1$8u&uH A϶dw/B'W^4JmhwHzRz?Kj>n2.>QKPye:F <}Y1k&hF}[/~]>I6x]E'.ݝK{՛}Ay{Q*jJh.q^/W7َv#wtGF@"$mV[FeaK# =b@RpU=֑%ee;Tm 4*Vr #vm\ʭzGZjH,{x@ _(frKyu4YޯnKp_r1.rS+Jk=ҩAr6B mوqh5c;:XCcQO PJsՆ֋jCD(6Ϊ8XPkx]Ccp^+evq']^L]~ȹKIII?)#GKTY'5 kQJ3f32i9}'; STVY9CLޚlld7rZj:k2C[l]g«9ɟOKw2;o.?dra:㕂,A ;TSDipZ1IrpK HEPV^c[Żuנ&jK.\|nOF!,+zy6*81Mt0 ›(,s$F@r`H2鹱fmB,hVǬ2if@/ozɍArS@N;IXeG.?N_J &orޜ7CU܎aZ 9z|'f\9>D6X ȉ DU~9˗_;+p1򎲂퉶 3\U$^3Crj.%xvAMz)N::gt,Qd[1}w<dU'=w ΝUE\pTځtwUpˇQCZ8Zr%ۓm@IھCH 8*kے&e}eǦΓ%൏@J@a)(cbl>)w㯿*lVY 0yA_; ޵J)Jij۫ph# A9W|}C@"JFEŚüTc 1|W(R;dc :j`=e(ډ4CZՕ6f\]桯noObdvW֠ +#F\R2>DB$/I;<m2`\̞l l8.v7[ފ1^*Jnճ8O݀hYgNJ32yPlLSiJ)ټ>vE{wnЀ+OYcpZM9l\xqZȵ Sq4e;`0m X@D<_pDٰ`/\xƕ቗l:AuLm\޹ͪf#.e >OϡڨBnV‡fSX5b}v 7AH`L>K״c';`l@6W=O\H~N/mgC/ӬToO4`=\bT:W43؀Fv\[h.{Ot~?B4ZΈ:-[1`W,.Γ-d+%uLJ+Q4ܖbR -FfML- s(Kf"9ͬV +w ؽJQRޟ)¶ZT$R7[YEWSNJё,f,l6-7Ihv11(S bAm7C" Spi΁JkZrd*FpIeGMTT-,$PVݕن+W,jUeJlΦL ZI:iL|R~Յ+WXvYOux}=o#)O7wySw}rw(zǧ>=J0hf̏|(_\^ݢ듓o_ѻ=bwyvt˰oeY֣ퟭRI)=2=왅+B,U)?R^jUzhC`Y!p#%N϶\yhdz[̈́}EFUe)K {Vw&פ[^-~q* SqK"hEō A ._8r}uցZ|Xfl^>MJI)r6)E&"g}RDBTy$1 ^&%(2lv̐:P|>'Ri+cAR=*H+ri-UEy5ww9)盓r9)盓fe;,)yv.Hb&0&b.sG&Vq)ChAYz:PӚk|kV `II]rҤ$'MJrҤ$'MI} N$Yd? HBA`B`dbI x e :P ƶVY&K*ո| F>RD)PVT,jR$k׺n$oH5obKA3Y4( fYZ9fmO1pNq4|x[k)iCN21gy.(0JȵК{b2rɰH 71I^bUlp9Yjupq3> 0z ITp,kʶ׬@ +9#)~t3/u짳p7i 1r١ y-]*PEU9y%CBH^&+x 1 YaN AXÒj6\ӓ@V`#qKXUaڙD&\Ъ cvLF;yXJY~FI BUg% -;v~+,XyU N0dO;xvkGew$:l>CRIb^gwMM5ldb RЎ^\އ^w=G~/z3r+ub+02aqV.&01vxFVlIW),I++H fzWWغه`r?Fv $Cro'cF']a=ӌL u4[i~AhvL}r窗(ϧQܝi锪HpR2C;"h5-"FZGk)},ZEqsN#FMsFZK5 m\C rw_hԫ5% m0+Hahg!~O_bתI#V(1~Y2拣6"ReO/p5,n+|a=ݼ7G)ۯrY={D~(&B:2UIUb%@b"'ROnh-`vg= [icRecH2pŤt0H Uf#=+wL1l$TuqU%t@K~)RkZ(0Re:!0RQ}OĎ{8qmP' Tq:8,>e [ᨙrmԪmf/̇햝`V޸/oKJ4V eV6 .>eLqc7rJJhDXmJFs94%4Jc9  λzUMH]#Ɗ9tT \=hYJ TUi{7 7;vO9vm`7U kwЅ^8L􉅃=ǂh* IJ V ^%wq=MN{= .^< C(a<޻!̶?L3ۚ7mo$ڶZX>Zكzc\1l/ݙU1pPT\%U ޕPZeÖG'eւFҏ{.Ш]C;p%/pX[[nAvXz1o`{/Jae}vX;v4]k/9{]fU7E ΏЎB{q@ǫy=o׼x=5s4 rivڻyV:lhmս[U`+$Tz<uZ-Ux6wFͽ=:1/c|*`fDVSt9zt }٨ً6*7L*htZ[zd359VMMȎQp~Fz]xt=oW yJ#"P՝o5($F/eogҐp%{%eZPA{]IXB"a&A6 i;so;V;BGg҂PDxbreOQ.ej"wE_ |(?eGQ\D?އ7ƹ("RU1=R’?7O!7uwobnH]~}s;LoW/”pE蟕Guv-0q[<: 8Bk3vv.6mO ֪T]!||ĪxqI=U#>5(=YC{0@Kz7̨AчsHH fr@p%fRʁskbp#b2we<$:Dq,e0,J6`.*/1d@'.uap<ͨ>Bi1ztx2`U8=D2Ct朥Q !͆ܘtJQ̤1(뻜 ՠ;FJH-`d@2#Xi"}JNG @Ȗ` wt3̆A&Ԛ`λ9h|V-.?NEdA L48b5|3=Y9*\~Yh~.3yǐb?iT`@ sAdp2\@NfckͽR:0+2Ւaі1r FVl]O;>]h~%%{R CDhM"@lv8# JC'0(w]iqeZTXUƂV#:O:"7Iv1H݉}LI$tFzo뺻NCsDu8r0%Kתr'4$Ev(pȳ*A*' e둮ɑ\\owЕi=YrK&>bLT өgKG>8PETe LiQ;{DU*oN_?׍>L8O$dpb nI(x1RJORL:/rBi)E-G&uvp ܠ9MؖFfB DKIИV6Jx*2 0Sj3d]ʰJcV*eʂֽ/nH^hRPWW6Sx)ջYHǝV*År 9%֧by*msԤ>者?b c{*YWPף630,@,ltʤgR1(|_.H Z&RQf h5Қ8nq1HPo?JJ kdՍRsӕN@73$ 3[֚T@klLICOgP^vJx.Ya)Iqޕ,:>+Eй:·6J hږ$]TP\ c0d@tLMXn}F+D:8ySz^DP6Y7EYCbXD"NN[ 0[N 0GZV,lvD͙{Qf2m2FfAq) 2^%e2sN}hd"a⚴ZLһ|UAC |q2UƁ(%Pd(9Q IċRz [)8{x4`ԩErd,#C9yAsim,J`o,\R7{& âslJ2ĥIe3%|t2*0dHuu 9ʃn\%NUuXJ]a6* f2 YŐϷ(X'yfN 06.*(HIQlGlI*{sKc)ûw/WK7DŽ eqQO.a=sk|Fdk>r鮈gO6-ty%]?nӷg7B9UmqUt73"HyZ}y}qt_.s]!>c=sej:ML$j龶FF݌u m!W#rl=8w$A7\ _sjvrpr$W)\䪫;1} z?Bh+[H$z=Q$!wLl"#xR&^,3+QEbG-hT(-{1xV!l6bh77$Am#?Z6{; ~뼑#l#حގX)v,6-g ݧE]vu\PRɖ>tuCHa+컒4|+>eB~{,\_]3y\u+594l?}xV@)ӂʇ [K"nfJv\_B}MdO,dxʹ9y(3h5Lԅ[؈ĸ2KLO4aH344 FaO!NHxDڨ\iQکL;q2\GOz0R%fy'^S""mhi_jeL}AB* =U^%ҳ[5oQrey@zcr{8[og_esxFlZA 6^J/AᐺΫ5E,o9v Jī7b_jhc>~rpoiB(`e{.p}źJbeR!S9 %l>*i8R5%ڇUI1CpXĄ

f gF43\)}t~~~w[EE嬧UXF+DuJmdQ2b KQq-Db`%p䠴Y0]d rc!&d7zZnezXXWxC3ia7oPFM$ud$3X*DV Dq!+rixJAHzb|+P`ʁfWRp2rd̚{mٻqcW GE.0O;{ $7l$JmOw=gvKݶ!HYj $Y*CŪ%kkȼ$X3"Qy2XMB~ 0?>ꄤ(8(m<.Lb.x`aJTb M8$rsW84&@&zH-glaw< P\oqSsJh 5 ^TS)n\ߏ*D=)YJ5Mu#,hk$`+ByBdΘׁP& ?/! C7V\J(.Y@DEH4Z!7şo<j#- L0{%ig\#hSJS3\J9KR:O삄B&33)TqBn KoבӮԚr?ߺ\?Y9oKgU2euR6Y)-2kuw?tˮj&j6W7+cv7pRzBwlPŋч8yN_޽KcdwqVZ|W{J*h副euS;,K:zkn/KnSaXf4;2?H $OܳxG!G^* :b~Is]|Is5s%TjYpFT[[' X\!BF$[W#mi͖u 5#`"XiGW>q+vHp8 < PZuYfF֥HM>MRVQ\H@3rebb$My\dbJ4kEBFcks$}k&>%T ބnƓGoBZhk4"JN}hkt1_Gbu/;|3Q^T.MW ʑ HRj{~r8i)dߊH^#JJVB (Qݡdsr)Z\?c㯶uQ\Z3B$4SRŬnuOy#xH&ǯj""Sdk"f*(5-m] sꯔRCD k)VA?(ZCJ(V0JGZ:YgYf 51lIY 5a3vWE*'2-תyTfuyz`H]H;[-f{uM}Éw M)~:;&lqyq!|~dhٛՎS:i_P0OeUhsȣ{Mc{/k)a-Q`HBN9C]KuQ:"1R 9&Ǜ.?ٲĕ8q2/jqi@'@fqC~B2G`T zfy;ANKE;zXpq!ckؚ$oE6Ram=τcm{5$ nw(RރpvZv|5S"܌iA,I)CnAy9`28uxP'07[owj'yر<-ha{`0Eg$ؚ<fh^ǙLðb_zw޺ӣl )@j>?IZ0m*UE I#p}ʆ֜^P O2X%Zʤ{` "|NV/r>w@`q_g 4d {Y&rIݸBPl>"}!Ke`))BٗI4Z,)s!{|ZLv D"Ned/ȉHbf&*=D(OܒI&E`Tuد{9]EZ /j sn;A_5?eYĥhQdzhl=I>QѹF@,P ;ut$` hk% \}BSD!@;lUs.=~|?Ȣ/YWoeSޢy1KPݽTدxGfuк:{?y( SxpԎͥC=EGtiD;<>}5aȵ i|5r 8]J΋?.[lx>zitFU'+fƋ&@@$ V}{]A2Eed4G 8⹒2AO֘?\# D)(#rV C%m{ɂTǍc\jh!HۯEH)z*ijdlf=>?= HD@VH}ДrZ#U%j)9 *uY٧C*q‚^HLӼ_y&5ZrH<Fa&~K-U=mFQboh0"U:Wt%-XE'PbVQE koIDhn9pXhqyAZ6J \٬u&1]ի9R\,׉4w=xG/WF`/FĉwAj:'6&{67@юϭ7y5D Yy67ĖO8JS _dPz$sE%@Ҷc\wuXJUoo0sOp;miչU"EIJL^SNT{qkޕjd+Z]4{2AӾ$M7 $ͨ\_dWn"- #<<2Ȭ%]QWeYy&q\HB&*,9AErq~`Hd2ѐ!sI|in8!ύS&(HOF( 'wE9 =r1w Kt!}1CBeP%n%*=,cqȳf^./ fG>ǝxxGAh zbߐJuf(2FLPkڄ6~茕)g tfI#\2ZI&M# ONZeY̖``8 8aRTB6I)TW6hHiZ!V cXJp*J;vi~Uדi@ڐ;1PYƮ7$6U+{\%-AZ ;DC,d?m‹ 2x$K L9u_kYjE`i#`LEhQ&`/I /XpyA„-..?>֨DM;OTCLљAh˪8_Ds.{Y3E6^M^vRI9E3%=5\yHjoK-X%>_ /T'+Zѕng_-ҖL= Q mDF .e>}.%ުeO8LRq[Wٰ`9,弎?nlxMt=ΡTH :]nxí63"҂[@%U$ ݅8Ό(-IdL0 "!#B[o5̀1yljIHj2NAWnF6!7b|3?f|8 k]gN:|O6@O7<6;wčQAYmfVy<$1.xRR~."/dJuEkV֏8~#{_ 9y40^pR`?;>w)o#Fi/:Y/m$Jg3l6aK0JCR% Qd2}x(x.[i&dtl1-+-!U7TvQhgf*ఝU5rJ{AzS"~",܏:.̨s1Njq6 p݅V#sb7wرn/vUv,{d#> Ci;Wwa1+(L\g|?lޢέ*^~'&exT>0FQی,4,9IL'N L2Ɛ,azR Pr{6VW:"6LI@FpMZ ŵ)-GZ)h^zf_j?+Dx~4<߇ϋtL˼mV3(~_}fQB@>ỗ]}NNfOz?_RAst{E۝w9 h_A4Ly4L ~ٟ?dE۩<.> ̒`*۷2u)BP,u>/(mz&0C8bfQڷӻ+!TCqa,ZȀ%^(y csQ@Jk(ZS(=ro=F#%BXA/]!P%Z ~ WV?@'>%<7#`e "8bQpQ;o5,3W ŵ2@ϴD\J;+od}+ [Ɣ.> |VT垞K1(~1 _f|@´y{5`aߞ7ǥjIwxKw`~?L&wg6GhztR3}R#;TCXW )5mk@%y7WCM=#.~Xz:(12tߠ {.5RH ΩA0Y:|ǿ5C9۠7̆ݣ%:k昳]: ,6nsVY'l]ҥv俳ZX,]S+{ʳƏ#_2FнAl )eEHngo]U`o΃w6g pwJXZ;__RɖwNAHtW:X보dNg)$l4˜-˲q;"!kIykutwɖ,JPD5yq1K4%وF{zq),/;|i?\`zr ˑùLfGp"A.y=? OfOX|pڊTksm,{#}yӰ*Gբl)^.< nGK "G<]>XjYD1P!!$'w}V@ uJUeA@5" ӼZCbV?uTbjb<`-伳XlNk1?Jk}A'jTԘ6X:mcηJqW5Sqۘ{ 2qwǵrD{Gd^r'=\"ȵ$=6^-uݠ,t;JA̦Tz6*䢉mևs)b4EQ 4_S;ĩXAyJ{ʔQQEMy 1w6ëaDems#)F:8wp^&8_鶠0ĉ1 Ye3A i"x 2v4f7 *G=0m)=F*0J5Niv"8cǘ"dءA zi/_w{W%Ι+n8K`Ыku$|19HvrR!u-w,9gZ0#KZ*h0UlR" F9D $C'',A u5;G0F3Lo2k`~{U ˃efcj6)73 ܖ>`<:5,30TˍY ƵgR~k#` IDp!G /uT&/@ЂHSjoD /{w!@Q%Q[0:I] Y׵$5g<Gם>UɁjoao]VFeBICVŋl5)P[M58eiC&6U^{ *yV2{s,&N_aM2d +vZy!Wu;p)[t+v?G-v!7} O)ߟ ivtǏĦMҫ+ yՑQ#x)NTLm w{!ն_u]}؊9~3kzXyFИq 8+B!aR-m΃w߲nN0OW[2z^0ὅ|(dDNHIJ!̂ǁht Fޕ`y+%{LK|˩PfΕkNd@tޭ&YVYSe;Nv.g~֌@ Աw"" m>/Ӌ6(eJs;bRsn~Tq!ZK¿z-h+O QZono킞jKQt)T#zҝ$_lfiC Sg DRuXh%Jb?} &M.9μyIYKfql\k]c0GNY$. `"9ZǞңW]#9`-FDhkHƸȄJJ{Τ1c|l?̆v5kJP~Wjۭ6 {UJnXKݲ ߭.@3iR w> R+O >KZrQ*Fy!S%{Q fŬdz\ D;&sUҗ4ysޱ !  "1b_'񼏯VŚQ%,,א "]Y`_#\w '84MTngOS^OSևimbi}Mywԇ;lz;Wf1Q(Or}A'%s/wGuGtL" k7D귪(HQrVQiYTp>ߩ Ą\ @ 5/oJŽiH~+e*#Q5Pwg~ TrKm#q8()MڌG %vNOwΌEXbkO]SA Bɉ9"*40HFEcŖw@\v񪍵(ۥ?~9xs4W@87yUV~eae,]O[1ܱ2i1G@(Z6VF̌nK D[O.QW=`Յ.!>=]-=wf4F5X0׿!>O`)׫y9f&Z;h"B%n]D^Jt7wtT@>mO@yIKE8~<䭊7qn^ %^~tŏz\K`d/ZՑ0]oQ5(W[q`UG[MqSz6R/J$y%{n6~ee R$2 8w|lSF!- %wd/h 5BXS >+ -(Q+RnJ1*`B)+ pf(ۻ /oϗt]>] SRΖw|bL7K=7.NI%R*;wYعswRX5޵65+Þ`ɺj>Peu읐80e;ɅȉCjwHbri]#kqzq\MJ`KWk61"tW&^AHjW!ĨV#bߖ3ME[D/mΚ[3jPm-ŨzYݚ#ı)\lSPI k zkfV/K'9:ʛv`,7aH1ۉ8M x4U`l}A ŕ`l=il&%]OAlVV);ݣ&r۷ MxOq ?Bf`˖sU)أT܉j[g@"S=x B~6w t~>(mՔ[0x3Kodֻ֡R;$hy~oN9)*jI4w;wc20(!ad~w؋dj{GviY Iry2:p갻V:VeϰS2d⪹v=hvھc݃׸?2~m־gy>ڗ?zoh= q ԅvpzŇv7pC*X'2  g!{'񧑢z/>uߋO?G>8ϋǿgم1O'.,4{QjJdz;vtzrI'5 [R%|OWsj8vہ݉:=ݨv/Ff_k6)dv{'ZsSfDnD)xKpJ=QE5&oruץ?ɛ~d{pro]׉I I )ퟦ.<}ۉ:0zSӁ#(uxvJs>ax{)ǧQY_ğH}u?zmR~x=_q=2{?!oLL鹉r^!K7(C?k^ A'YN3M7iIO]$ QE̪m<3iU'r0*G?F9y'F{.)HfƧFH+&h|B2߷_ oD4cg-balHϢ&#qkʰnnb3& gyl( N TC;Z\%mߢ~~e_]$+XI] )|Jvv[ (,, VWvU)jˌM\WB<T:Dǒ\o>eppo qC _.Ve{^|+rK>ih#<{{p 8Awvw MHG;G᪽\%#pQi9%. ʙ0A-pt (9"M8~@ցM\-Ճ]kOA%1c[0Uy+xqNuKzw^{S4?BY 1#7:Lk@/;j+,ZHddq^wޒm bZά6SEuŅWjxᵁH[U݀$>˶J\Z-L5AwۃCw iy#qv7k/fz 3]3:v%`u޹uӻ?4}%ӓ54 zM\/Z>M˿vm!#'H[e U7?uJ/KRF%6N8ZU6zqmFm3fm6i#ܬ ,NXnfJnm1*RH5m㞟nn Te5+bt՚kmp+ ~UgVB(T;RisaׁgJ*ZF3Qorˋ]eurE|v@ϧ 9#_|1j,ۿB>7{bhwԟ!߁-qep-+k*vd;k@pn_Do&GJ\?_Ö׮ @kV)Hn (jr7@Wݿ!ϨdJ;j,S XAӽodN ZR_rGaݮ%4òREVHNʞ{˹l´$坓V7-$Y5r92=N!uXOL Q`˭q`.[#)5קQcK)=i)dgZZ,y}d֦@2VK% j$xj5G9mKRI"n2@4- jv. ,qc(8xkʎU~X~gǴ)_fNIor*dYF:r:^kmu-Ƙ䦗al|F27F/6ʭe i=H 3 :6X:~7wU,aˋ DZ(Nw;)GPgV V8~k6=Jy~˔b#%C"Y &PN1Pd&[d1.r~El`,CE粐Jd/HwL/0pC&P1G9/L!#4t  )BFE( ! N!Mb3Eyk$%|kom7c?-巶-τ+=!"Ʒ9[޳M68x“x@>Q7a !ss[[)t:uoB`ͥ($1-6^Fnb[`}^SEOL Fq+,A<g H0C5(Md"Dsĺ@=t\#˦PIz؎ .%GogX_v׿@c6dPOYVȲFo,k=akX ^KuvqM&%f',<æHU˭s|Ա:T0itpz,7Vd"Zgo Uǭ@JO\`\qjB}IZPn! Q+ڔiu9_( hcʖF+X׳ V) &ܫJ1tZJY6Nr.4.L f74g`8˦q)ZK-U;vmJL'̱(,DI=s!D̟/ߑ܄iLW'T,6$ ˍM˜-tb_R?9ׁkK],J❚4Z6Æ?R˞ztϫ$ieߢ}H >Ձ }f؉'w* z貿m`V301by"md^dc~;^..X Ox s,/9Qy Ԃ v\F(4Uf}: Nvn',4a.BN)CĩjIis^Qw 9;%L>\c{Vrrѭ˦]BaK~u:u#Dʐː 7YP:3Yd,jGHRpH C.ԾRk RzleSфz[Ih̊>y6Is̳8w1m]\[rH?%OI>h{7\R*:caqk45]Fs/jy !12Ha†΅7 ]%NyTNxô_ aoX3B9淾p*EaE 1΅dVY{wV!Di3k0E ? ne94Tj0r0ӧ 1#9|6rUw{at-3o4ȲUm6DC%;ʴѠ*7ezV O|MQEym$NFZQe.n=8&w Yidވt:Uw;[ A Mm#6h% Q"U1ituE[iLz/Fj4cL }X%{o{KcU@=c!θPSSjobS/k@- S^U4WUJZ5t 5wIY-T~-m 45D ̺zYwÃYw#af9y2+%_Wx Q䴷Ƃw'4xvNu!g6>d}ȵw4S7OR?'kbX4Oΐ52ڬNXkYkU42U'x޻6&oV}}|UWV-~ ingXyZ4#[+q]y0=H>Y ,3 \=->WS:ks-)Jbw,2*(>YF]*3u(J1xMmcxJNeZ8GѩX=.:C5ͲPxyUeg5Zk-$s@ ({R<3I|h}3#^ݵj0nkɴd pw?7{ T[C:wӲ8!SByTp.xK -qI H}<%$}֢O nܣ^p|ħo4EѾM[;JM$a Rx< ЩLJTbnU6]"k9jnbY . SHayk3I)RTKOO_0ļBް߅<^z1T@*cRiSDV-TXRZ*icvCT%˫o45Ɉ#Rqd?kVq RdK&?+?CU1)тgDB(dQQ -arĠ*-eLZogzeC̫eڣ@+[MvVviN[ՕKU_4=+MMTW,:Nˌ}d[谬.wxL}:/VV<%FÚ.Y3:x`5Z.`5?7Za$ץ~c0 zW#'sC +n$[rDdLpRo1r?I!pE]*chc%NU@Μcb>M?5|He!o`y-'{p⨐W!qiCN3e&ܴ9z«%^ 8A  lcyS.`TeX٦$1P|> } CXfHXU/x&"qۀl.5*,rFK`@np>1yslxv`>9hgݎ=tҌ*r9]kB_-}BW/9j|V T<9Es>e+gZb6q蓌K,н= 3(c7i)XXΉn$+AyXplfqcFtgր8JC=1HJu0[/b'&}SW>XPX?R7+8X1JL?~8yĎÃ.;YZ?LP" &7l>L;;.u*3AeѝĀ͛:prC!YoQ˛ho+T2!T|FXۀW=[ddc7%9Z;@d?No͍X#xGw"|;SmX(%6v\ M .}-J>m #]eR3E;W>F'EO7*S:=KD6ShsE0û{֠o&pL1sO œ '9톆|r^P $ę5<^duv3utݮeQVVx QD̼Մn!,=|vW?M9þ^0 %U+STHg˔ .#_~oJh{˝VNt w rAZlbi\CSMLO^P?!^zى}q0~RuӜx>e`Lw;KKH wsng ~5|b㋗AxM *ӱ ?Ye4:z?M1zfЏ`BF~4*S;) Oތv4Q(>_5Ols+M@-Etd:>i9 72GܶPsL4Oŷ[VTrj#,0.B&sVr:Z b΢v &&O(%X*,g%u1|A^nt@lWzHLGxщf(b`cdD`ec 5 +H ЊdS$5 xt:\yw B>"Ն>K 5iᗔwSΰHkS{ÉQE,,5(Ȍ#**3|{gRc$,pq-Éǝ]QfyqQ}c, /1bTt^\&$DR8:[<7EryyqTk4N /xNDQ0vCDdg6޽=Ɠp|J]N1ewn]?HN% *Ҍ@uFW7'D2F4I7fS)Hn `H4%H`-p{=O Lx3v/*7zFyM谆Ĕ )iun齐jOӇS@Id@չ7!U;@蓎5Hl Wdse>Dsg`ޓӈj) û*ù?wn}d{>[ĕUg#a[>QhÛ!=.n)I+ i3,fÙ3 0Vja" ,GW GDy!Q`c E-C|oP -YO҈gsp9bbe!P!XZ9`a[xK-@Z0#5[4uS^&\d[9Kr,Y__]|17EDNH2!xcqF`dUQƖ8z5sX2EuL$ӠDd3B@ &ڍ]mR7hčV:(e(E Xb4hKz)opޭ "E @i3;$|%:[iN"tLdY1v^D gV4Fp)8./j!ժ{e!F0?PdYb}s")`61;Y0G@MDE8[0o?J.&gxM>eF-\)B^z][,2U~U_[u`Uh]In]ˣ jyra78T};128O(Ne"vGL|}Äl1:T9^Ff ckTA9wRsrDFHbrK8,nCDrC5 01 e)#V)JZׂO+.*Yy0ƼrȈ U)A~F(F~[ϱ+A \ܳ b3$: ڢ6JV;wh6V3˞ݻ3\)2*#lDp%;1.~Uc聙7zq)yd+_ɴ3kC0 NkGzdgx7;F8@+2û!(yGwQ /Ӳ!q˝;"Ώo@&x@g~ů͒)*)RbAYQBS#"%g)cE URD`D{KZ*l.d-A/?Y'ûΣH'RWb9?76{1W*-a ƪ4cn-sF`4ZV64`XhWH}2w̴ƅtМ?>iۿpa'j +*S -NS/SIT |*SצS8&-fiΔ5%1 Z-0aDt,/(ḬQF5Rԑr^/Y¤Bh\EǒF Ȍ71md.c/;**S5B)P}<Dp-'4ΥG*лr(j!HSIp6Kd˛mƛ'@,C#9j3g/rYGZ%%Inpu;4qrv5c"8i*P"ifQ3j)AO\Œ*Ưk 5BI60@/ao:z]+?WpO?F4{]-maGjSIgjO>:}7Lt;n̲q#D 7>P4f){8GDzF=YU G_St` %1pLvJ!m'"?}:Z==>w82|ܯ*tUI(䪴lʝLhC-UzQ"\7/{:1 HUBԮqWNcV 7Sh{b{2䳧Ļ@5O}f7W4aU&7>-;{zǧBz_@hC퓛/u7_ &us֒}~N^/Y9zkQO_*rO{Z]*l<ҹ)'\,6\qdd!gnmʓDʁP{ Au;3037_7}G|J{T737?G ۰37F6N]4hV8(FbP&\ք#3ɐ߻Վ&?s4U }Y߀}^ XJM^9<)9hTc2Z)/垟 G+tūØqB:NQQ>NCW_&z1= |g%d[C N ̼+|[\N tAchK ;hYʒcjY qnτz՟bape9NVe=8K%L!ר5\+x!P pM([  IE_v @ۀ#MQɆC.ƨlpmXµ!-T Ti !6Ϧ)Q?% 1°̨]^IASCÌKHfl9U)OHsd r"KjMii}9>tߝ x5&aÉd+_iNzB19*nmjc}B@©D@957dyX)1z]zekzMwzyuk5oV,5|_yHl?R=+()F3jAe(_MV:7UT )Ro5?TVɣDUWOl+-/gKp){"a%dž C!߷dNUUA9ך5~/g_cZ6,tcӖ{՝ZPцcOKZ iB9шTqf RB9L 6#G6GQ!A Ud@SooBD%fsV6x٭)!1YS{ݚgtnmXșM fS\SBԝ9$\ɷ'ndÞ@Z-yHQ8ɆUDfnҕꄪh,+׍pLZ>iGMe8x#Fv =wYIq(U1:Nk8ňmp:m7HzTP_vPD1II!&S㜘 i4ɄWZ`SC"qJBSښnPCA Sq16*h!87*]Hv(_ W(/dd hW qQĨn Aj1)BP~ZN6U61D3?FCζd|R0%w; +1d%Q@}h=H;䈉)B\S l Pap5P S8bl0 RU@ {w#/XU=gC 7èw>A02=Db}DBI*>0=B ?bnai0N=԰ Oܓ +<lkfhC}RՏ$S}d_FA3Rg1#@i+,)QpYE$@9ևXTZم%󖁿T <`Nih6U)TFPq WI!VIXJay* PQi83ye=8=y5e0.NGndHrj:;EA](ag]WFSGY\ۜ*&ʜa9'gV*J\@Se)Yk5RK\ٖgB5M?fT5+Ѓ*‰wAEl i5*11:Y?%ԱLkmSHZ=0H5AR1BĮ1LeDR1h@d2wW]}}dSZtBNS✾?TѮR_pA.(f6p~fYiy6Mw+v+\8((Ptr |C^حB] ^DFv=pJ#CX%i* "GD0)ҩ3 y *I8gL+&L+iƱf yQi1|Rb5A:ٻ0J-1e=U)1&JVTь V(B92]JJS}n>? TJœR(겚)ˆAҍxkf>u+WӢ3C#r90SV%9 Ns3QNN>ib٘$p9縰*څ.N!3mO(Bq&~ro7\<`-/( .xyt*<0_6-?݃><\1Ԕ馤&I~7/^/r ?s{߆<>^ټ&@ C96揔0s>,]XҨPDlzX0t9PTk'%.T;B.z[9HU6ǨFq 4DF,b]m}O VSR VCfᚐ_R匧_y(O5hE.d$sK2ܪI׉FcІ*Mv8掘=x vŒCbqX*sJSLK46)1/(!pƀv[LJMn΂ε8Ta?0뫛+lM93"a'NIC=u@7W> {3GiU檁!$Rqkr ŗ,c8gpKm9su4ptk?Kw>eJ&rW@.Ȁ]u"{dϫQ,U/X(cP&#PfG^2qFhrVms T(S\ dҎLE=T!O|=wM|gmU;t=q J7Z@!n(XҨZ y$nGIK̯38HO穀9:3Yin z2?8ߒW}*ͦrvf?BLqޤݯ}x&ɰ|[N'n+d587f$~`(P&+%|/N6>}][oF+_p} `,ɞ<,N`)klK$LjIiE3"UUuc$!jV8 07SSS?{=ipRzn?/_햒:+Kc\ai{د'%Vwc/cǐB5Ä6F kj=9눀4Hr5zrs!VNp*%)uBY:tinkA5gݟtl:]ъ<%w_-ױl$odTT`@67rOIoȎ n :łW?,? ח%t66$RhGmf|/!CЊӰ!$$ΡK#iwm"uy>y- 2R2bTȈ. ]6MgMy~On6H߮?\javngo0 .?")9"tfoe_H`Sp $"%:P>5ޥq"DD6`WY_^LqaJʀ4D6 *n¾ Pم&we69e97un!ňMqdygYn3>_WMjoWT{˖gjxCC{X+IR^䆣+Zy<뙄y7fzIg' @.iA_ [dk؞b^"M{H\Gέ:Vh9KUncbG4? =3䆱 jNa0Ku!=}`Fw!]]uqlҕ u!(pU*Z1;ʚ]!P9RΙ*>k(Ղ-t:gm[R+h!4NBax}ԵKҚZX- ݉jM`;uX+j^]: }NYioJIHq'Ȫk=P#m<p "-cuucts!yZU̇d.Vc=\r]|{|\=r {gX%Kukuz 1ٶgoXű޹5`~\Y2FwYN@ sMnt]zLwrLuqvJk!eFOrouHڼh$I 쭹cjƀ?݋\ h#J`00#u-x9R2sZd-{kb]"'wI8>jh] wѯw|IE̞g\2Eǒ>U#fKdFuh\eGѱS=90;Έ⡴tq!x(q Kgku]i iR1Oa?Vd >b)(9 u[wX8NPLkϰx~&'aRDeѯn :dÀO$R醡xo|N~p᧚4=2}pnXÀN|u4]MI}5ˣ2dJRtE^ʇtv0n֏ ¢2o~y|DΧv\͞'5LL Z1V2~3Zer@fyFo)ZŠu01pNՀΐ}Y]h΍#&gB+fHMfL2i J&r< meG{cֈlӴ`̾H X}-\k]1gW3&g"$G˵ce$7լ$`,)\/J8 T74җ2, 9Ε )[ԑkS$RIrUIy١4:۴\%-xͧ . +|Me9| Un]y;MgoIoKe3% |]%YbH L ǐ8}NtNk~qsԝ ̡^<=/~r߮JFyF~&Ow47aq<߮?\c]j&Kݙ8|nOH|#F'!0:P>Bx!"n<DdDPʧƻT5NhQ\"u#fh 0Rzɗ/_fBH'\BcH *r铀ӅVB!څuD[˱@b*D5/Cq2^ܡ7wؙbo4JShJP%OidA&7hڽ%o"byo TuQZ]%niKe2G3K!2ˈ5^d^zS0j#qJ #h76-or&Ŋ$|毋4or2sF5'`3Lgn\I JdL; 2ZL-Q_q~z=.tV iP~PLrLEN^0U7"靖]QNN۩ mCUq_G(졆С kzRߜ}!K"=dk}Kb JIMˮtLQ\IFHTq&R8Z1S4c:#ܠ.1h)YI|Rv%x8 1O-MRB;qlܙLqJ &[kka \i <0aM{@Ȓ$快;) OG}r'|'~+Q yƻ&Q 鲪5{7fL}c$=ㅲԥک=|US`W{^:ZEAnǧşI>ű  (oa9 vlV(ޛBC,[F"֟d\K⤣Cc=*}HowJ;lVF"3@~#|hҷ0r.!J1fcSA 'SThK!f/!H+aX*RR}љ;TcX@ 'tGO^ΌHq)T)4Up;]FJ!t0}f ΩhѰX͈-s225= C.5OQJ mjô{(f1rs4'_ <̿"7]B)=O7Ƨ^Db' y(;p$)}‹7A ގǂ$)~ `+ch'\nyjG2Cְ"mҩS妗N$LZ2oSK@K$ecwmwp2 tjAJfFcv'CV.tZ-pnVDBǃᚷvLbNcDi!TgE붎w qA狭L+zH"% ]ҝIR泄T0 ;keSg^l\nE18zrq7]Fg'ojFBԦi"HV}X8AȻ ZϰtyA/>URD<ТCX vM H6ԈHYܓz BbLtYݯ}WE^TJQ}g07^4-=>Rћ|wQ Fi"TUH<omHˡ!V:,/;`VF\;niDۻ{qhrwzZŦM)OwmLhGzcxь&iILsf2 ldI%%;',QdQ(ˉ,~]ũ/ ZbnIdRD d%(^HŨa7F DI Kb0ZaD׎3E#$Ĉ[j%=8@ (#.B)/_ׂkizRnr=3.|$Ԡ:>,N&땐S(hu|eC.Y TuP@-2t|*=342vQTTW mx!<ȼ;58Nh4taݐ+5BF[ b4c"×Syp2U!˾\Fݚ Mu<aQY$ͻ޽ۋ 4d#(qJJxp5w= t\.E-}?Mvu/Swq3Wӳt45Q#)yϤg#ϋWr<@z]2 ){Q6E0n˃p<"G{`hN r/ gyz "stߛwy6H lLK&QeJʦlPR+)87!kۓ95BQ)JL ᨗj9'Soa$by'o0@UM u5<ez?]\h۬[ 1w~+?0xfCWe7 =ؽB:_ ?Z|P13Zz Qjz/|\gAI1f,ʃqO=?}g~Y/@|)2e^a/m7~08~8v;8Io^v.e93`w8APzåׯ;7`ƿM:@/_7@Mٕw ozp?AM'V\Lx[+jU6 @i .*%aPC(\ ϲAV}ux:p& S,pu\X}*Y:8w<иFa9PJ*nRy˄7'劎LbJxձ> @=f4 ooaB"nV4RXȩ|u;[uA=KƕgMQHh\ +98(?!M?'0)7Lœ &G3-xEC.R+ͣepsU\4_{oUĵC8#kH/󒡱|;QKvOJ{߷]꾻/=z/8UĜX]Qbo5H^\#KxTLt{[Dn,K8 xB˚+ @]]>%j]Wl|V֌_|_¶VZÁk|3.9ݵ߁ | Ł'x0v!ކFrmjS?iXR=Vv.x뗫D~H Ï]ZH-鷕-e  L%ޕ9]9{{HUG#>9"o)c3ɮK4*U2 l'q:MR58ڛHE K#!:bH1")G}"[j'$7:Km{x4ÃW~4YNλ,fdЌa}etTnǥaV%>hHb5bDW'S, ~`q_"lcG{7e}ܟl q2 ",7>pv )X CW'kx{H9mSoR$6&\clWH/c֜;1e" ֆ&xk}a4Q唌[[m')߹H{i#F)}t-,Vn5n3, l}<ϟ 1'>)(7<8;63"&3)ixJ˒LM I5.b=ƘD{&oƍ߄m-enr ي½t,|U]xH:sHmF؀Qp#,0 CR([rqK  v91S7[-h3KJW)?o']wYA ysW[ AۊbhVYU(MR[K+0-QV090顜R23cK,!c W֏ a6]!Jlx\x՟ "4[!xs?#4Ž-onI4Ҹak8NyX͝@4њ;nm½(he\#Q01"`EX:[o~`cyu`JA֖/Ց[OΊl^iSU cYrkݿo6u϶Sԉ|GR*jU3UN!z$ߜG,߸~Ιb8 ,(ߕMF*'"]0aQђ[Q=R1:@<2::] @p)F`ɇ9e 29h^tO͋1(N=oA6<-&4q^.0a?;<7a/'Mڥy8i\HjsFr9\Uvf> % 2&^%GIbfTqi'"b++ ujULE΅(Q&ctb$ i-X<^☨E9p fD%s<(%BTr6h-3ZPC%k ! v,EBWbkkkkkUqگFtȉIF>N(t4;<3JNbc؁tR>v?LOw rtL1wLm1? {U>b7s]IS_xy t|i2KuO'8'0 Z1h7ׄ  !&g=d<]L,9ch;i K!—E#P ^G,b: EbP^8pӍrP#|l7|":VrbS+J, L%10 0Ǫ;8 d85lB"MHu[ZØ*CLf)d~^>>O< 魏,ɉ~g'?Ulpu*BejZL7.60*/NK;edJsk>k JwDXk# o{7-z=D6ܳUWR\  y&}nM"aTISx=es+<`u'w^*Z%Y%h@}A2ȉH1GSZH=FFDi$zfPHuV= 2`N J&m(S WY=! c1dooUw9YJTǧ}J_3\:已6  U^7Ї "#v@q6˿Y9"ܔ=_VHYDiWj^ = 1b?\|BpbEoRl MO w߂vAiݱO/"vO? 5xotq=enF7.oBTKrޜ#շrmhe+Kzz^lIfvhB(:<:zˈוFV"3^*p8n[vŬVPcZRȧ(~|뱈A;-JNl+jg̈1Pj6=g~<-|Syo;<[Ͷ*SG$~޽vA0 C]t."XpR+`d1@Kv />mՌ/b۳Q5BH>eXB;t_^O YQekv:i/7C]8dg㎇2 }{`24ԦIY׶CjE;8Q2:5aj'PZN{5m9 o/B3w%2mC63u!zF+Y/GﺝH6^%Uʥ{5SvfFR_s^wв5zW/v$ҠaWkRi;1dymjאO>ř\}W]EL2tgSMB%^ HmguvvY^h>SVh>St3G?gpKs֣o[+έ壏-Ix ww\ί>:U>'`w24n9挈SnD'ӴuB6iA8JҢWQ^58M= lHf3ʹ9ƥG7B[h8=Z q=לiˇ[T߭v{zW osJ0t(j (U/ml# -_6a6bq%n_'O.Q?rq2 Yϛyg.[чذekAoGg#6.Z_3Gϱ/h/BROM*q'ȈhQOھs`@[| 111evu!t+Di$oB p6LOd3`KUrrR dp!wuI?L!JiVqjib[Ơ@pļZ'yh0ִ/ ZUP '+'`DH‰2.&L*ISm06eEZ 7fef9`JQf}4c hÀ1j~l,R$;/M̷\`*=DP* i8 n ᦂ4$$p~84'b7;Gk6+<2*l+N }R5e2u[]|Z濻n޾yJ 4ڤt.ziCOn0)r$a:?'m9XZ |My#璗ʍtFYv1BIZ%qWVe=)B6h4j7Jz48t2 ^V&1Z%g •0[}0YgB.2TLyC,%㩍 az9 KM.d0m/ԮF_P\Zn39E=!%[n*%pyzC߇j\ԁNH:;Jw:xȸyθy=Rt@ACd\J#*`ȐRMk 2G1,tA:}b#/})[!ג^K>#nyVQI,>>[͜wx?Y&.6/8tQ3dL^,y󯳷pa/GS;x6:s1$A} @h0("1)*1'-_hg]+7#S07K  qbeV{gMRnx;td6R9Iut+hEc֫BȀ5-<#ҖE9U/oB|GZ@~"@E=5mGZ4L毴Oo?JF 9*/ak#/C͇ ʾmJ쮚μ̓WMIFZa/]1=Tˣ|'CǑyMM%t째8H[0dS|A*uC;Fᦃ@ݘExp!GeLbPՋiaeiaEP7ƺyl Pp;CAPTTD8O(GdZ+Ǡhh'Jg5ЎgQͳ Fh$no>__n6HCzr@o~S@clY( rL0XHLH+~`~-  9v|%Ƣ+e%Q!)~CJ{8'F ÚjR1HWjIIӅ{/֔ ֍Pp nk#1YI5a\tqJ-F|BK$r੽O.Kབྷ4W2)j~4[֒4/`s g><4'e*BFe<̓TJDK"GRh=Yie+bcAWꙒWZj\))zV n1zWlLeXEyeO}0c9'sPK TN]ϙ{a:!ȜN7l .%~O|wxFN:7֒FR aUp"qRx-yhA}r1?1}A# [J7 zvd399$m`;_87ru?[s[,x<cVzoY|KQLQ-MÏ_*݋&^?N&7tʽ >VC_wmv W(h .`6^Q=q<gHj!^쳌KE}*ʙ?o!bR-n2HrY2U bm[c ?Ip~|n02"S /,zҗc вO}KGf}(j@u?R:GFJP ˎ&83K ?%Hz8%̗.߇_q 0>nNZ uv l@\ȁ?{f=i4yqPPدZCl3-S_I%%׃0.[sU'P">-e9oy0e6ö3tčo.q}QcjZttד>zDz.͹Q"-#1S<.Bz495Z55S,7|BFvN`7-]6ʋ}m$cڔ\:@/iZ 0[F0ͬh XCQjVR9Ln4W-iʥH#Wd$IiBŌ9oa ;_>|;N;oޕǗI#/(L0@I]7ۋL9$fA;W*mlLssv'&h"[V[OK-溃,dx'0 idDoΞA7{~Myt,ǂ!YV^ѧ*OK¸d;'HD|3M]\a4NMM?|w7zUd1į5OlE5~|3 R}mc&yzatD,G⯙r³>xg"MTۗͬgfJ)=5U8:WnlJȣV`5zNh/'J6wkݺWn{6qn0@8&  >V4:b+k'هdάn@-U[򲡟aRjyRʌ&jjN&/;ajy*aFО[ c{׸ϳs'2uӌq-@=}T)ᛧ_.Txqr1&{xwCmТ.~K\Ɠ%?Nόx!dKK4P鋻wDKtR{x~Sw]Mp\wS-]m2 Z)Gƌ_O?UM/^ݕH?tֈ8Hn8}x0t"%W+ҷS:5ڲ\3]L-(AHǕή?L/V~7҈-Ft sE59FҸHR=h5_Ǝ.|b~GPc[!snPλ|BѴLלlhoISWviY;uX!?AwWlz!<JS3i]{`㜘+/Μse]m\ϏAD=jBmO*dT >l~lՃr]fhQcqRC2@F$GWPSē-I? 2V6XI1=>:Íh и+ Ŗ]u5(=N04M'0Ĩ]Qns 9b( H%1!pAcĖ|@B{㸔$<@J4͊*'ɓ@9XGXj,^U Qαo~Ry/+MY_KYjjOX '|"j㼍LhXBk0x J+Q5v=ݵ+rt'wMhZoR(TY_7PњZ:*cjA1?bR (RZ9gӠecG+At锃dw~Yzgζk j׀,:ĶKo7'qoF8-1e$FrmV Fu`4v`yKmap_ 4+e:s ^9Mz\DvtևFb QDۀ+W@X0X6H'ǸuKV]SEHeb^!({#Qs&"k ;wF0K\q0c6ۗ"% : D4>ae, cM$6@YRb*;EVAvpTU.ac &!EXJ0 '4ʦn \SNQ5hzqJg4[}P'z-2dQ3L c&AsɢL h:ЅcjU2˷&/WPtlSU_oⴘ=QJ^NEcן[cRUeI>AU{F+}h}X qy4gBmnwɥ Gx1|s=_\+Q=X޸*jHM`3qQ7hdM\&JO{RC(P\Tuaw6lANqH F#}cz.30:t&K!: NxvI|˷dO\;0_zO]3n,/Ied6|ljigcCX5:xwu{Yk=uuHz clBէ@_7cPrt,^k@uS]؋|9o nUqN M9gع:ٽf9i8\2ts$sNTG3sUfa֏vv[0k?-ue@nޑC|)sHx'%{ vήtaJOq`䩛. fm2A<{Z!`@g,tX,fA˹|mU}tCھj"u“3vڃ%'_\5p(?оs+lAtz6֠-w W-yZ5ڂ^E3XA=p|:ɘr͌y~|C,X2ȷvr]ؿ7O7ot~moғ Rg|J_Ja+tFOސ1{]ػލRZvv?/ jSKk`P A4LvZV.s2+ BqFR۝Q!h}p2iJ ht3nh=Lvaй|׿84$DÍ]`E)[駰ہpgB,= )]i]8 &鹉>rsgx3<;~Ǜ)o{ N$xyO|/I`Q[pe;dm'ۡi  oL9|+Zu;pE<_Kmԧ`:)W/__5q}.r˫w'hrpmɔ'}o.>Y#v JC"WgOк\םםر]sp=յ$0ׯϳ#w;[e,=NnuDNۀia ;'#U|W y.{-}۰??$< 4/}kaǠNHHFH?vYךEy" Kr1jp׾_7ə|qgݻM>,F>]%k4,Kx2q2|Mkwk{=D侤# TkuzIޜr;/ T,BD A1 .MY !0f/ET"+ xzz}&;LS&mbP)&V3,8mo]~)Gq}4&\O&] i_/:θshg- D<̪4N[SBkhb E]Ogn(BHKN(( El𵯵 q'd}lI( Ad / Q~i/Z^gi4L*#*E\lFXD%B zEDh@uJQFdpUĚ(ӌdwjC%ң35v7خHg Њ"iddBV5f!=/d$a\xhɐL!a>7 +i6lL+h Ӈu^蝍gxMyAmM a><5t)LMkؓw-?Yp\AO0:4874E8>iIMwK\q+}ZGEtSEao\ɲ=.-{bwԛ3AO;+׍)Xeu:J-v.'c;QFT.Oܢ d%/Z;#PHV/CAװp=pY1fA\˕LbKHJ10&{  SޫӥB?{]w<((;&|y&\7LރCE+P ƭҺ[wέǦCLf w7˲ǤW[vPҼ&nmM7ceXFsһkgr21tƖ؄97ocY=.P([ DPb&4A1 R_HYLTA0Ts"n]fQ­B[AE٣oTe,~uc:RXdKB B%ÕnFR7c! mԵ(Lit9Ub8L+X=Rwt v8&DN}XVQմAg)HJ5SN)'٨,IMNԄr4'Y!!Hv6-TjVݭ$0H%rMĽ/=*vɰ߅GG Ay `yJSHtT>Fveyord~-j+٦GeG9[ +h<#@s"l9$j.ׇv's=0 $y*'%@|I+Dįw7{od۵}:Js5ݹ\/dk.Қև ra ESWH㒔PP>ߝJJSƘ$͘dPEE[+HSIpfTdyd$ZݩƔaL30<`j![gu*&3QIrprPE!^J9ru Fn|)\Ѱ~"L"X 2{Qo_/́S%O-oIqnz m,T@;̃-R6QX ʠQ0"beE_kf1by_RoULC0s)FJղd8y9t- ͂,թvJa=3sY@0"n{2;X5.&yۋ\!50suYk٫ɀl!>(:x+sVi4P0+?v  #[DЦ}AJ*nJx1GR=Jb ĂeӉVcG;(hk*cRXt~S0IdBLG`g?8pj(Wҽ3ٜj.w+@ LLc?rɩrNT!45\z* 'FUe<-,]*9s!h8orfEOA fk.0l9HyYA2iJO:R0_eR?$}bvp㈛HkJM@)59 eT(QŹ Y(9xZ=e`I!VhBRg DRr:"#F)n#-@J Y^neƧ?wCz9NҨtt]!QǴ~GkP7| 7p4>" ml KqlyZ9="A̓~FhݸR\ύBy#8֪(þDIɟB#Ɖ*۵#mg݈{ t<0ST9ԅƎUI-T*U.*GYmř97}(D!r[@)ߝp%D9]w#imL=Q_D!7M *\mx;Uým|,T W6HvtF++VÏHfQR']D\؂N&Ѫ'cmnmB32OBn^ 0 ȇ4a[Ƃe< W7JV[1I? Ɋ {S,bPkrZ6M 4IկpU 8𳐁VuRox*?Yk]\w82LΖKȱ=!o_pc ;[[fդW/ Vڂ,I ɜ.id~#2dşO*2E$&gؕ=s'\e=焾iN ͪg'_/-Z>u:Nh_#"տx!O׭utݺ<]<]7!؄,ǾP(* BR"PY#|,>֭?]Au.C`[NcVцZW#4[ 1FcXL&QfkԶF?wFVnH DTjre]u2$qVg%DTBo?#X7{|YҺ |8T̃` 18!)[9ZNP3nB"{))%Q%1JksO8bQLcNSpql[-<"C +S.6̜DtC6ɿ46g62̥+YgeL#8H.7F;phCBV^ej5_Nt6VNB1cy$vn+;ж:q>*Z% 孶22{LUXn`"r0iD ҐKI2 ª[jC#VB!0ֆ7rnPѸ]=]rV׺wț#L DpFitOp9^v -L^rGץSE`EM-眦ҿxz@Je[Ż[P#nRk3u!EX\ݕm֊lVIPdcs4v!9W D  Pt[dVF+1L-bQe(ʜ p91lb~/Xϯ.jAوR эM% #$a*nS? -j8ΉhRiFϣqv{}@fV_Ze/] 4ݰcty~,\=rЀ ү 0sWt"wK _#R/4Buwj&Qz.@[e[wƃĔ098Ev~Q`B3{j] |~OwCxf{r?׮z9dՎ)^hN?=ltƞy'FΨW[ZJκ!e'WV)6S` 3r'Gj21v9JkaXd罳NoCRSv(nY}fmvJT4?⹝~/lPN{kkՇU}o/纂g fgOw \B;Ҝq9$}_SU ]Uгvq8{c Gy; r X3-'T[&jf[GM[ut #^_HuыMƚ^N\xfU6hy>mim0Q[{"(6WfNcM'L!燪;5}k%P=urCH^0CŠnATFró٧?R$`H|@T"a" ԥ,&3PJQN4ՂH)OLd/Kd .PyuAq*H>Nǩ \V [97HH \q)'RY,x /jG 1He ?B43*JU:HmKU%EkSyJ3y3y`&@!lgK-|DDLQC%Ҍzx=9i ͟^D niЫd&)NZ0%LRO?`W@Q@se/#qq<$+hܬѸu%\đG>@Lš6==[0Dh3賌')`ni-~4Pj< ]!# O-o!Ƿ9xxǠ!/: O5oSBbcNu{p3:쑴cMQ1"RpY] %;[76QCTq{̐:ZggHFj2ʐeu$:9=m[IKȭ}xNDww]91\,k"5 K²үMǥa|nV}b? '_7a8 ̠h0ï+:%+QV2g`D:έ2ƌmFR`؉|{We~V'v,g1?DR7-,jՠ]0qsYTFRO[+῾OKug7[܋xxF:z5m!h]l'["vp'&u"BG9#>Ti ]LokVv{`աm^Lg"fΙꉶɋz`Y ;gFm$P= Gf:f-KϘ; sDT`g*ϙ%i c4+XΘ'+YC+Չjt]fyg$jGw = mJ[dz%;aAc VUl8B*FhFų%C'YXU.ҿw1z HGxu/?h/*pg xYGxh;^.׻= Up~JqG%Wh oҽݭ߷7'}CC%33IJ0I1.zYK2.S\֎@ݶ If~z P2^&@qJx=;"Zɖw.ťx|O%:Tnt;4Q*P3c$Wo}~tU &ϸVvC{$ݟ8Ugv`>sr3t)6L8E#j}d& 2ghH9uhVvelb]`DB.2+ +%xjbMj9%Y.pNiUj}L}Us+)b`ZBIC2$UL8^h', R!<+e0cGO ]@leR=4? \0eHwХ`g2nRf/EjRf/EK 6mJ)hqņ ֊b.<̷` $Iy+9ӆo FR cQ%ᙕZgܔx0>רּX1,K R`4 1[$eZ[ ;.ȱ]RlBhx=j̀1BX'Z` M f^c-E,(%.,2OhY]u#Q;AT3z 7QyT66z>>6l㴜b4l;րM`q0q&߳X%$tJAtA[%G9vԕTL2$ %b :3GXTt 3sBғ̢N|ޛvIa)P{ZZ:;iF0YI^ Y:a `Q*m368aL溳:oәοig=tڣDqğ?[$L$J[ bWG+UբsCGrTCԪ|Y¶%1p"F :^6H4|V@lm#]5@h9"jF+uPh .xlJݲr·Pڊ"gçXjxLz sb=#qq8JXRX,n $;+^Y[^w=u3aGۺz߯l=D>><%"J->@>҃_'7H 3gR9_]d6_d>g;k3_N_S-mӷG7W?`%RIwp_?f?Ŋg*hWG$S&`qK"HLH94*PPdr2̡dU*D_rKV-B*{*NDjR`JFJk/ J1ud?~Mβ$ j)%IQ"`cN(bPlD%`ȒTd)H%I 5%i=$ΔHgML\)KUUtFM$I]I7rC?7`s~o|YK7Ofr:ݦòkC>?U#-9 -,H_`,vRSq*7Ne2p'# =U!cpQp /۟QcB?|>-чvYƒE4}я#XS!6,UE ˷93bk(hh9z׬J{+fq^&Ξfיk&*-b5B@.%orj;pΝqw7&cBIgt;+2oWb7.hκ/UvK\>Qr )&v66|;3GT/i5 6{nj95s[nj5SKmE%HfBT|&)RLU6ݓ!!z͉8b5cҔV@ o.F?."A̗~~쎦չV2*4zO,hׂZ[!I##BID RZ:z<>ڒNo]p4/eΟ m:F+mx'0k ׍!9;8RN7]\R0g;nKjNN)2-y z |hM%h + gzH_!ؙ-*C0h`g a!defIDS"%/+d-ðE/"##22"y 2EieRaXUF!˅'/- g T8H`Ӥ ʜKDܕDU#ƵZgk6a"l14˸eN9.$\ˆ,gq'2j6%fDKܖ'.|qn+q.4Frj qH@KYb6hyG!DUw{D}\vo=޻ة:]Y )гwΨ;dt~ڤ-={fi-D01%O>.&u覊}!7{s9MmZ.|d5|b#~:>$B:'MuOo34.D1NJ+:k92`n]xeq _̯./1 7hRKgܥY./S˿Lu_.ndƃ}=$& ? Es:#Xh;`Okz )UeV$᷇)5("pBjW4ڄx@30۔:S:t6.S)U eKYZasn> "8]`?R|L!XWB1kD]]hRCwn!eJO1hyG!\t.PIdK-٨ 9c!J׫σ՝30쯗(EdVH2!Na\~UNٞiwY8 X{t'gw3E=ZbD#118d" Ť6}Q^f5dCZhww&8/(,"F]'Bޮ<*8w=1;욘g울59)Dc@F}܀!вXnu~ ȸk#TXwduɯހdB^79s{A]^G1KA&@~4ǯQhyG!>_yoHj>S;]ppwJ%W@;^Ӣ.ׅf(f|9Ιy 1W9nQ2 ʈ1\1S]h˴9RۋҖBRi:_{tWH: G9OOp 9I$# iB#$9zkQ(ɸm}~.ǂj}ŮD&ˬY:i`8!XqĕeRZE|ƄP8:E1%J4Ti*%2o0)0yh"A"dX&j̔(5$ıpR Q{tu'eOk cL-KF~ò{RmBcw3c^3z1"@TnE£,LYE,v (^]^JLBE&Hk(ujB4.6Bc2D%(*KHC.5Eu`VRUvOS AR91>{\X0vlԔaP K˵4U/bWFQ<*sjd#s6+Qy* Q6WRa-a=K4~9jzM*0UZpraD3|.4C]G@y$ġyCAph4XKQ<)PQאMAeZW j*Dhg Uh&J7Z@+{p(\A*R! Jz=u("zgs%-!dɃ@ mi!{BCŒ%)dtaT4RnoR SAP߸yԁcg7v{qTɜȶjX9;j{o'S֣jEB`5k۷/u].#ani?dH>?NnuJo(l-C?L?z0jx/-/{4,orldܑ@E>Bkoæ1߼_ |><"vR`W\#s^=F)ZQ'i$J6iEptN4CMde޶(Ŭƨm]q-Qlj:P6:Jm}F.WcT 0Kc0,IΑU,YU=P{T=[a7748~iŭ@ө)V?Z|̛>.&ecoبPބpHz\*pxA6> iD&/./RUfJT54DlvwuH~JN/2HFٗ8`w/QdDH%K쳔faXrO)/ϯbI ]b`20_b<8>m=]r!%$)4c,MJ!"@a^7E8GT18ӻxm|dXƕf/㥌Wyc]%Z,p&ɫyRƊW[2c'Ŷ#q\xd(iJuNTfŸJ3 y5u՘j39BĂFc8Ogb~ 8rX"tRCOuh>_A#dӟC3q_\\8G?LR?_ @[&O'op68-8\?4VUR7w8I!h#ZNL:haAy竨 d⑒Ƈt!,LF~V+G] w~Rj`:ޭ2z_B?<.$n4>CKq{ Q4{BUoF<U 3ΌWLFSe9J)օs-q~;ecYL@RL>qh_oeցK;:g~N֎Rz l[Zן5n09zDKޕ/{$t~-ۛ̑,ԛw-B'=bCDpܬ_{Z2ºUŰuq߄B!+4^;~ҿ !ҿ !bH%ħZ+3Д߂KAtf3Z#a"ϕL:gχW74d㶫}C^,xh!t!ofUxqwK\Wf*_8;ǴD^<c^uk C?ǔN?1ά:[e0S٪_hr3VZ)=H8M=R`e_C?όV;d )L`Ôs4p, UXjEfϒl0fSbٲQ]du)Hd yzѮ;aγ-|ح҆f{Dĝ(ޮSP/cJ*mkG"[2@w"2C''Tw/@'ZntZgp j!];k5 稯%ʣޫq_ )Qp(m[5k>"Onj;ϠeEr?^K֡-//BY(T<'ZZf6xU%ŷxEkoX%kKjNM@Wz0Է7ꮄQ4UqH4nWM6**)'mMhJp8~qbM'ovU ƈʖF}Fo 0(T4-#m7%,/sy~lGoބtx ]_8K=G3/H*1]]'+ps_Q]B23$#&a"s!Z$T.U.eLzJj0gk#*!Oa(TJ85 2GS+6{e_J-"hBNfA+)Cl Q Ou&p`fܱTc+(5DR&B9"Gfx&b0C'[nCt87RU(4f~Z HNW\ lXcn9&w!CU(B6wBCC+q.`l%=oUqty!\R)Lװ_N&0CQ\y-!IY?vM{? V)˙jx\/_.B(l~uyѐ`W1z>4xz7\e+uỐB}=$xxPf9޵m$Eڳ}? qilѢh cI&ȖG ,IID٤|q 8$g3;;;3Ua >rUFeCaDb'38\2i j֐'jQZi+C9~`Q‚^04)H8 "r G¼)ғwf8u wlSe%e> R!ާK?v]:۱ m^ϙg+y1;aVdҧKO(ay8`UvTA(CBe7o>.40e~Ff1œˤ3^0vЎIH[q;UV0# Z|ل 2HPYdp$q҅,T`Hp!$.!lE.oz.8SN_AZ5,0t"# (Q1L;̶DzkdK]FTWICF@*$]7 jEU>D):u#y5by0Xg#[Y^R)Κ=Ѻ^PZJfNi8Mw~QP^g%^K}g|js!YL'HZލd8Fχ7@stIп= _g㷓?ad6 Ŀ&=i|z|5OᎿ]s|qkOgϮzqi Lq|_.=;嫗^]M3[?ӵO7_^q}?^n<6q LN^哯7gx _{0q ksܛLcYyy;r`bVWfti#_΂Ex<(tpI?k\6HJ3M"dZKs ͵u5I;. \@0@f[ڃhˎIJ針'h ?M'0a3t0Y>W= ;4QQlw!37$byYK8Hmt%!>O|DCj8_Nvχ@XOUQn~v?Ow~z)_y~/ոD .og/GIһ5 y#pzC̐9Hҏ~$|Lm [.&0,!x8_0fMm44 A;?ޒFg8{SH:tϗ`_#>V?u+%KԹᳬLϟcOi#iҩt/nk~uЕ DT7ukHNw8Š10PXH^^ѧ32OKbT0ʱat L;NOQX@'5;z-(6݅/4cxD}y~FgNN: 玡#gK<й(9'޼ySFLώ?82-0bFֲVQfB#RQN02A$BaIZ0zWZd\#L%cOnIK,s$߯oo"@LVT̶Jݘ e 2"Ί5'-f?@h5B͉pZԼQH>ԇ9"B` {em~2. q9c4yˑ &-U}lbu~]wս`\NQU`[l: 4ZTkE6)JyGcowbC%8 B+Hs  5hh1Um[d5;Й4 qtVoeŤrȧ8TΫsm'GgϽM(&+Ӭ'[η@[5DCRv.i.FS…-i˗-ه1%hR&mlED`b?{qľ2n {(@PGIz8P]$T]9 Jӽ(AKdǨA1S 拓PBu(ΒZ#)*+d=}M4Lp4F0rj̨pe%*j}Q婿7TO= `_~iaO&7rP2n_' Ɣ'}ӽ A_ -a7sXA!)լ'9U[_`, b8m>-~ KLYRcܷ BL)+ٗ"rql X~{^>މꇏ2*%aoudamVRlʬtwJ)ԺrݩVDe+WOZ[ѻY[;]EE:BdtV6jd0x>2n6ASύ271ze4Q~ʈ/T}GBЩx4/`jSJ@D.B]&9߂*IFlVϋTIR$J2ٸ9Tv8s$xڭ*ED;h9=OjMUOeUP!!߸V C57aUŠT}G%)Gi j:$J2%7aUŠT}GO=VRAV|"L M}+ύ͎.N6ird IP ]D5#(2ER $PD>y`]-"^.)miCz@Bpd8X{SBoy3u̎NŴ4Dj `瘶iZSЖٞW0+|c U=SY8nG]#:T77?9ztoǙplk owժ):JYݒ@n{/(?7RkFߟ{$׊|4o+]3ďC\rx+BHT?V@Ƙ3 ĒG{uq?T,͖%yf%bAWL5B3I&"@PGnlBA$ &|TDuZ+:b3{qZp"Nq3%J RTACGiQD꼰nl'b˴i26ŭuf6><'v&S P;\r*#Jz pqpP偺y)4ntRݼTj񜯲WCīr!0LÃL* D҆ɀcEY'e0aJ(p,P]s=Ut iEl"B#\ 8HkK   R\ #EVPlc%I&@Py|Nb ڭAȊ(q <嬽SQѠ$eJ3+!rD"Aw;^"xG5(UrϤh .fNW[ 5.Z$!b&e8%$jR rKC5"+ET֗af*s̶ZѪ]&H&̾}u>+-zˡ@$Aa_q(&9L]*R\Lji7ǰv>6GD3rPPEO|АD On 1!X\m<036v7@;D9QQ3w?~01+6M%凷_g;cř(tFNk58uѻgڰ̛;eⓣf|2'O$[I*$޻:_ ;/R<8rL hߕfTs>AhPؑX8GFV G5<ک[WRs, {0q I։Y4 9* 2"ZDF ca`4 ExZqn5N Gzhb01dV 0dȹXMj$U ڐ 6Y-F8- ) Rp`(ÅaߣHZ(E5Sh$xT5$jV2QRrIb YC$4E9Л(=h)!9i60dox_xkK[wYb|ҊY'ja;\<~%֓U+nFEWGO\⨳{`l-<7T`LL?a*w>O*ĩ̕Wu¼6[?sjc]yAZ@Ơoo a~'" |[d2Adqw CnX˲&L#J [ߴe)!6 wmq$W,Ey a5 ]yCZ6юwdIɬ{5Ł!UQ'"##2#`Fs(2ޏ6{ 07-fOMLV?mzvv>w=*Ѽ~Ǯu<9_oz }qoeo=Oo$0ŹV '& oR,{βG8{pl_U}|j>瘝v.W"r[~N@A74JhA7[ԿB(ÀRO4Q^BrS)X>nC4Pj>> 0e/DŽDθ&~uv{vzCvPB1/CP8Hb1vvT]6xtż'ǔbiܸD㹔$&'&g%*PjuԘ_Fb !H(a<RKU[=+p`,QUqk Q*QdMV7-GjBL`V#PYAJTkIQ0{Fr u _VH^V󯟿}`/xi8;|Et8dpH# tR jJƌB+SWz%xrt]ɖ (I@|O>`p{aǪV?zSq߻'W`mQNͻj+/.HmXW6շ/L,;:DWTD䶲,潐DBmhDMQC9X"4.8 )( xԍ &o0JO-~EXyGH;0E;sޅ>=ry>>Ƭh.*!뜹H4D/t&hBrwzF@3H4"bbLQL5+-tjd8{23:om#tOy_izX@b%ޕu{"Lq~/'". '_)t pj+PBV^1饀䛊kCV Zrn9)("q_ gJSRW]-½&z3",gX,6#dX߸]"-x!'d6P^~Yܥ LxB"N.p>!5u9/[yE/ә &#IV ַ)Ԝ^(Kt_ir"FԦPD3Ǒ-ec éfRZCaP}U9t-k8S|f-oI9b)|5PA3d-ȴw`piN ?x}}2}7OP v ;ZOq<~}B+|,%^'V{YM;tvכQ%3h?\D˧Am/<=Z~7>j]wG#w=\! @9c0xLDhqzcYEU4yl0<`\`0{U{RH$JI ;*y $Au29ϥ֞fFKVcqh9Ja)VP˴IZ㲭D2`ѻ7"1T;HS+&T$$5T!Zjez"|*H,FRY2%˩.:T}[- G-6͐呃(ۑn iB!حkE&#\bkBvV/Z=/(vhSmܵ{2 "e^]HlXsV /grp9eYҹg_tdn׻^0әmeБӉ3{,өiHN(,dX,bλ'W`t(\V (jm21Z۵{fUQ1lO#@ uOGDDs5] #lc-Ӈ43xSIjw,䰍R2ͦiB,Cֶa{!DN˔qC4"Jp+-  Wu5\r߶mZ᯦dݭB+c+]pWdѼEsGQj Eo$?;x:`^"I=JT2#/iFQ5]UEL^O+̌8s_iԎ`+PSl~#::tA҅Δ,@u/4l)YAX3h6c 2ַ|pb_]rR a|dr8o#08r_oPKܯ)`D cf,*4'ÐEլ=GYrd-^U/`2!2yү@Y9YțoId\u}# QYt~V l^ ՗K|5T/w1UF-so6u] vv﮾RtB+Jť` j4M Ss2 ³ Q,ICtLocu*W!#h,v5B[ast(`w4Efl,=O& ضe2KMӓ4*Yl]6ؐ' $$rs,uѻJJ4g1@Sĸ|d0v!O92uo\ 0{ P>fb<8Kg^BϜi'Ƣ{I5 Z Eb|$S2G"hzX p<[ }ר]^ܭ_/axBzӌy9][8u^ @k@z 5 j1tƴ5DvN?^}9>jб(=݅k9FI Jfl47+k}!Rv ĉPAenXEmc%@8G<]s)n15CW[/yBcQУ([Be4l ĥQ1E*9(G*Gb~}T+L4]:6sBDGe߼ϳPKp.^cJR=9xyg4@4B-bE\hE51*m! jC%D Qe.D фOdP(Ơ,DḬ SYq>Ptw uw?O($#ot_tu}e%URyg\e~' <4*F0F{KznEs\^|8r^_|x<]jEr]{-᝽wz|}'^/'{7\zf^]ǽAM-NyEQ-%*mAE8JY4 IѪC(Tb6PLjnQDt!\E"qy?Ld%+Ģk460PLKFG ŭd/WBp0 pbT'j(S(7!7^ &NK\A [ĉrDLi||67S'z=me>X|ttp%X2 )6_RR+LBo (ɋj9)qj93.̅cqD1-OjwY $/4#O>#Ra1(wzz~,^84GFмcinjU#ad,lQ\FL% /w$*Q=J&piz]I ȶ`$FHkm)PowmBvgBQHHMR\dͬ.]}'T.xRH y[Nr7N]y⏖*' #m(S][s6+*MhWafJL-Hcml٣89!6uMDJ2Lͯ/h4iL'Fd\Mxbe&z5ڤǿK`c`t{)ά>`^n$/i^9=((JhIw dҙđ'( %kSo0Q/y{*o˹%{UGׁXB%gpiޓ !<і3[iiĈYSP#wS=HRöfã¸U褬+4^fM] 5mƗHmF2MnAcw*J0u)SN_o̥R8'!}ʔ$>p+ &@[mˌj۔@>(>mt<^oIDfU$Cṡ\&9ˌSw8BeEŢ55;c_¨BRV`78/ ["saٟf07X`gZ]Qn/:|ެwHQ-;nO.7v46JiޝS跱ሠC"Bs!>DVʟ HVpc_j5Ԏ @ٝ?Cpt7b$U{O_x4ڷ F@ˮdAEO0*r?BNHP4| LKh>>zT9rQT$g<* $e9hg [0ǒ|5.1|4Nj ݞR<(^uT]qϑ)2SyKWZEzWUO݅ui0w\[AiA9Le9jاquM*5OlTZ1 M$u}l13'GRT{Hl$/Gs0কVTuhW4 )Ch2"if1)7Ć:6=c%.M  BW~9i6ӑr^:wwX͌ԗI7  8vH'BDdUwӚDv?=5%\X=D7^m[i&2q.r p|k_7R"!!I {eοi9j"ۓ`|vo%;PrZ;,|5{sKvk)BiΕJ>!!'d!6JF}%}ʧ|#3.`{I2!bcVg8|&+}y˚Q2Lˊv6WCs>ُԡQOf {@-%/TU!*m?ٙ*|Z=h8kZ/jV2G(a;X NWZQ&m{!C %2o -d"_Wh+ۄB'ϧ$7+:۾4&D?WH@33!C{ қy46d$7 *vbG|GhpP0 :g}&D*A\H V#IƵ֙Izi&7?) Efmb]>Md: "pk5͆_ٜɵM0\[ap.4UR( w %?|.Ke'/QIoG}7㍺ ZIQ.f"T&Bp*|X>8(JІ*NW_>(@ ܻk ADB:Xvg7 *њrҒCψ& QW6[*W4h㠽UA鄕tL,W8e*.*.򻋼kǙ*%sAhEiJX Lp&rXk"NR'b9_Oh_̱f`b;e&||` nXO%?\pCZ؀&94Qq(RRuJB?T^Bi͊X tJI"c8Ĉ )demu*Cfsl =tSM7?Bf/Fcc1Vٚ! MV #m47avׄE93\5kxA9cLaPAW(f&L!q; `j8[s/RuT hwD(& ~=KR 2dq;($1bSۅe1삄xz3G#,Nw~=܏/)9CFΨPF~:#wnσ߇ew/zC?9\F%2F=QPqfv, '"0p0Lbb5:5~4f{CsLL n>3X:u@˕DR/tS8sdK2)TRv@ T50 RBP\pI9Ѹʸ8'|Q9KUUb(X,eō04uJ` G΀jHA1th9`D7[ e0PE9@K<bC ͘xR"wP+ TK$ˬY Q,u<0R7#QCrkS0Ŗo Z̫'3$Ƹ{0sVgt-}i/ < +al4HpQ >钓: 78p~P+5 dV:PA"Rͳx0 ?\igw[vv˃˘Sw֣nBd: H!8!3F-LJ*- J@7ڥpܨk>[䵱nL7k+̱' PC)gq.nO4WK<X׻Px[agf% x#( ZD?ûiP@Fs"jl$]P"lS̈́-M!LڲѳxiPA@R]I8(hTkU4KVt@ƓH; ,hnTItrx A3yl?dn3Ɣ@_YB9CJ .䌢Ѣzţ$&9GIeY쬎`6zԕM 4^(e4A8(b0H"\.o(OB%PEyHGv\X4bj RQ4ЦQAC:9Q˛B0hB,<8iT+J o޹\`krƶv]l &:&E]DK CzKpAq:t}6-,oYoz.f=}~ bo7p7S|{L滼 yw~^bgFn޹Vb5 ?֫.ۖ& in~z粻 f+AXң+db.?K t2Tnqkua}ikwk4옅@Dlණ^=֗xAgdZ &WDD\0'Aվ L0Bd"w<%kf lyPh5V -NK>^ \q/>=3w_MSmTh֡ڀuQKUUu<[M|X>+rH1nF?ࣛe&q?gwܢL3km ⥅Lϗ$6s ]ݝ>B'd)7 'KZhr Qխ}.1X{O(v<|d/-:ƖxcyěhJ8|yq)6EwKFtL|wgZjoXC̏&2ď&j ԴփF'1qy8cZ|SCtBh)XeIFQLrI`\t7flcұŽ0;lw/>' Ai,G.9-vB[R(+)Na;b5 BzG#O⾊Ԭ%ߴ*:p8]-|__\puofVGj+8+=_ۣLKr?˓'$w=-/.`NώTLhxwm_F׳#{B#{;Az x{v05(5IDT<:YѰ-X"223*E5[ȩCZ\G%>gw}>SX$V=嬜Y-9sc=DY ا$Cd걧9ž[4>ڝКuԱw\jdbBF%Dȭq{& S!;`I(R =+7v%`/hb"f:ǣIcy"07Zّ[^?Fzd)Rfn, elt?425Qkq0}\޳+ۮѕs);lX; 8xq;Sn{O_KLU%Ag3J _0"eo,. {WuE 6<33IppM=È;G lqՎm@1Ҋl4&n nìY{ӍMyrmSꅒ]g/lVknjj#M䨦 Xj"9s|anl_ ;Fno>x,P=3c69P>rR8;2Zr#{˸( >! ch/f|a!x3 9?>:ṵJ){ܯƳbn;FwcYK:>Ad/:kys/ZzM_'g]Z#DyմUnpRO~Wi q%7\hVq5S(O %Gi'V$(FE?Þ>?'-1E/^K@Iek{ۓ%j ^ᐬS88 b-;"] 9Hh_ym::E`B rR:L[ ȝakʂ^ =^7g%V:pl4H{ \^e,^>K㜔^dze(0¡`X©PR3NTز2FslP Eg!lR&B Ұ-' *H a5B2>kA@QUpT-,t }l딛Sn[Nk1&J +8` la, sCt%([配Gpl@GENZK"#$`>8]b--pŖ ew̆ZCmC"@qNP)Y;?.n`-_P1Fb֯>Fڌ NYyuȋr~or.Um?1?) grMz|{l,n9fL=\k|H. Nn]]Ύ;֬#.Mn]Urk-3J>|HIv!,ʽ2g\S-8tQp\^`(mnGUǷ9NܠȻv~q_ »#\O{Bx1JL5Q"S Q[)PGي}yjCAK B :z,; 9Äs$pa;0nppL܏K㥍G8(T @8\l`? 3,\?no6J$Z_N/ F))4׏`?>_^V=_3ɿĔzp;`@Bѓσ2~+U lU$Ί %ҍmɸƣKkY_=zT]j z$t#\swXhZNvWT6w1*~O21ɍMQγ ce&TRku[@H!fpw]Ư 1uuiQK:piq- -w*%qܺm Z8Щ3qSщΌb{f*`gΎqaW9=Ú /M<U ^*@2F)ůmjETwWP?19JaHJ k#b꜀.{y`7H1LDb '3,v!&0#EqL<;T XMSvii{U8`QJM kf8ier;*(t;*RAB);rcڈƹ`֕}{ :Z0-ua Hzx=_0 bRF,Q^bI#;o5LQ4ԮGtl%FF%fh#j0*Fz,,[KD"Y][愔đDSU"ǂTPB hx I 6*_ B. O6P3'/SEVl,܇o㞛PKUYg]ҏtHmU nF>wjČ ;]X8^c!iYȸ{uEZԡ~4/Y8Z<^-!jeo/cB3zfz^71Wc*@J?=9<6$~n^yLPi!uұD?^*GNHIJ\XIq`2}Z7a~wxy$~a~X,n/..0:'Xs fޘ0w^N/2 _/Kًe&dn'7!/Rsˢ) ,bدfmCTՏ4WyZ[ wBRPZ"Uz@.MYg^Mܵ_*ZsTMRɥ{40<ݺYӇ,~\aGɗK :0`bJZ~czz'?ǫMlK[-r 1yǮc[a{vq7]{q~n7o6YZdhI~(ޅ=,K5}y ECVxΑrX4 ŽſOAӃG՜RyP4C36"ʑLƈj0Gxx!,#Es)-VW˱u9vŹ+1:QqGuO>T5B-mi帴ɐ녀 )Fv\3#,\E)q`*kQpsY%6AP%k]Zܨ͂Xrk'U1rJe!- !0X3oDsJ`%pZP%1KQ{ic5|:wY΄ygDJLҸ{$19t{lrVpiB"\Sk=Z"p%0! <מ-Vp1DXLZDQyhO>zEA =rb:i}Gj0BU*t$Zj,o(m0*vP~=p}Xh*񌅔 Z/fXBH:DD'dp!V%(cup-i]/ BoԘ+PQǂq -+qL#.(3@p R>|0 :]!vͲP񌧢Un3Rq$xze[XD-be B6qV%e: _)pMʿHM|~ǘ ՟}򻔲.,K));MALjz-0# wHrؓͷ"l${ȗ]|vl9!H/mYf fŮ~X,ʘi!GI濅w+02@Tے1S֙]oQ] aS/y4Wu7ݪM?|VԀ|}[ȱhLW**|R:cP2W/^:b_tzLzWWq{;Oy >JvJdˍ٣WgEXv?^ZOztr%fO6ſpΖbN\D[_xT_ǧ؛ӋswzӲ"V(߻Huf=|^KTgy>Aђ3&&CdhMU›C9z/e@y2p긂zI=C)eXZӮ t9O,XYbqgMkxM*M@yV&a%UXi-:)CIC>zN'^]o9 шoZl".osMOن{<81κE, 'PMVk)bKZNM|ω&a\OTf7GCl,wӮΪn5=T]o#EyJypgUfy|h[hg~CJ1# P'ML'i)(&d,VŔQ ^q(1ó~H-KUq5J6^|K̚Kf̒B$ElcDB̦ʂ&)E>gTxӵEߕKҬB*g5kk]{cmd4${hZ[QF|x92g)PFWMfs~6=?JGb40-N$R;:L_Ү$o2֩j IޣM>mO}hNݷ镵N}XWnl6T/j]ݺbPc:]ƻouJbJZDw)ΐTY]8om= WBZgJ9',Ofb[#f5]w̔p\55akah{[nSX8ŸJR7)z}ʿ[ZrS+" Wn'-G\ɔIAZq,kF+d{*LVWM \ C#%Ƽ@EQ)8Q̐C˼\9D8 N_6hOki:x[0y(h q"pA92dSH6Z'X^Xb y}ZuAdgG ˾>+~/Mْ;W^Bܓ_}j?=|\B|[>?n\Mi?o`ٗ0>kv!#+o../??Ο)߹O=|^hr Lj6U:l^,Wɗ6ļ21i޺C= b‰"TW)MD~Or. /g䈿*EP D㨡sT)}ǴŧC?I1W*a!]̗Yrw79B__W|~} ; av$~tW&ڻӋ痧7>+i^ыAikư*d专ܯ+Osw1ڋmd+i%VU*V<G3I;q v?>[qܷlPsqRU$trE).*e']I<ynj՞/2F^BC7g/|c^}CZJhJ^EMߺwk-{,JoG[Ş՞cC~~BмOӿ_huc Cb]Q4#.HiPi+g$-U#E:l RNHS9 (xQ 584]@=-lK-/ȪӱBPp͆?stg%LhTt[}Oe6!fF*P/8A}kmacfxsF~?]A5{E|Z=QѨb8k7z x j*܉{LKB[Mzû,fۡ`H폀<$&(XݣF߃AnR FU2]ZcXDYZs;;W4sE0Fvؤf̐anR;Rv>Yj @Qֿ.!r-yτJH('{i7]pdlyv8}`O?v5Lәrbg c &|,/DL Nh0>g ɛ2/]/YMb Ρo^+ L3g%輡ifR '\Ʃc؛ht'(LK>wO:TV@qfYL}jeSDn6@CjD{MQG$赾%P*x[HjUq$Uc.@mI!Fdh꾎m Im1L TV*:aϑ% ʨ-ƻzA1H||*4HM:")YFzgeVfYs70(fwTD@F"Ѥ:^zhp*(`WsuuQ'7݂ ɘjFa{VQkz3BYR eBx`'LCtW]Z_V fgLQȪ]~PJ?qj2zJàb.td$M&a߼VN4EEJ}GZ5TfU ί>46U-(!' 7O@ )oI5yL VzPNh;wjԇH[AgEgEgEgM6(eDLH BXI LPxyӖs|7ꔯ;[6Z> n]#f4m(i!&饙H\4 1<&oo$ dfx($1)2f"QD^Umej]Rr/:wtf/l pbA.YOx3$ޖ|ozyo]9a@%j;`?ƈuT.qq:">>c<=L+ztm4tMаMIw`6rͥ+r }t*mBP}Kզxzm-= SSvsS}sn’9msQu?Հ:vu{roۓc3ʪG=mn%8RhuP ;7D<@GbvL<эd_WެIh#+驽&;}E&1&LI-g` |*ƉɅ=lxoДBebɫ`n6!g9b\NX26^gd%ɃACv7q6q^hVx $e.Π+wiV IDo 1k&DOTjc˫y: ~Xt_o(g2љ5!RPi+zF*|LFht7%5'B.ݪW#ۂ%>s5hHL2SjyQ͜V U[A8PAkR#+݌//rYx޳vQTUJなz(Bՙ :n'RRy`\v7P ut@RfLsL["䱤Ր7PJF跴۾2Ϯ;U_tU|<\.J/qMȨ'!ç)c\?w'w7K+uT?]^' ǿOtzq~}5[X *hVAL@FcUjhëPuU(B_hPDhXHk Goo³f/;loQ;SǢWڊ4-k1k;ث]ҥE1ꗓph}H,6B~G&N=}wxХ+\:*ڣ% 2bgj!7Yv 4%01T@N|8@_VV}uj6VNõ<d9 נ*>2KñI䃔ln\(D@}ކt^ 4-2,Veoa.0L@|Azjxv%^X9S蹓[$ `ĭfr<&[񨙜{w2Z1.PZRfL8F@-3mv{8B*:cuf1U,.(r.P=ZWbtAXQb _ 7"`+TaOqp/&!؇gw-6hNQ@S >$XE+ 0&\," #94El+"@c9p0֡V1wY@xTH| 1L5&KGwbj'8I[P6ۇ{jSDW%]* )28(#]2xUʨ?y] ADžb *ZB=Fm1@z$]'\>Lȓ1'0[%گ六SVQU3-&HFm5Bo@&0i4&$E}=Y ֦P=oW:/9}CܟD/oG6eYY/;bҼN`^5:_ :CtP":6;-$K[1hd pڵ@N-KN{W̿)h!'_|^QOej9CMG3S;N6-O=!W@/-< NqPMy8dmc߼#(8QJXN׾€FӄoXDM;؞nR2YPQrldCq :#V`h^(TJ<5x0 q2F(ɬZ+ 5V[W9V1DЍBn4HeMvΉtB%|J7|]]'; Pqf3" R055En`|ZtOwyM%&f,E,>qx [Sc??>}|:;?W>>it@bB褽A';_?B?>̺8?K3sAwnPp}On-q>z' ϤVh4ӓ_ͽCMQ\"bl:2MPqJhmFʸvɹæ6#/|v_TBhb2;+9jŏya.T`d(̳@1C|X+] ɂ{GKeu Y(;fh&xǪ(K/&_esnj1(_Z4 KĖu/uB+ ДTB^B(-M(]A-h#?&_=7PF%q e ||p=>֙ BufŇB2p[?qQ Ac z~={{Te(`Tn|5׮޵0WJɓFM+6]<7J1"P9=8" jj,h@#gD99/GEKͥ ]j(Ê#*T*-pzC=GqiQSKk75X ٮfȅT_'=bĸ=dψ MRmW6ekN1N;c}ys>ˤ!.P,'HQ'DY{zwk?nR-9%P6lo.]4Vnj ~fr_s }nntOӿA2'>NΣY:9'~u9wLpFVs뙛RZ8-UN׍|&ZeSB>2ZnEw{@VA>Ļ.M:Uyn]X7n56ũ}EVAXpYq)UotnX7nU6ZFk>Tޭө}Fw; 2 l0Vޭ Mtobd{m>Ӗ6_,՘wZWb1E[uLN[v̛+;s92\ט&ӴU_C˺@+iQXP'墯c6qE ՅRC淯5-ڭӕ뷺y߄~z7pB'N@hX(3jQQ-5aK]MK${c=Kg#bKiͩrXSE5~ ؞NOK_}]?zzeĿ'J hSLqJvr LypihV[-mdr%倭=tFѢ"wt p5^ |ri3 h3Ձ#hf3+dxvt|b,-ly!Ԁn,7p ꝲv!Q6<(zVIQLL,7燦R.XsTNџ^(5\H 4ROHI 6kK+޽c-o$>׍"YTҥn1%GnL8{4VpCF`j$9eīPk:-ml`}io}Kl7f l1T뷔 uhv-Pf龿7 #QTkuH)[ 1З{{*Srl#>;]ah+ɺ o9Bh(ԴE1^ [REPuH{dK4 DA\3lJݝ]uǵ$׋p?osR;|<2rc 1-wz"%SqaUz175=n] 01C) /S7(Ly#|'cQlwr% %q^5κ8%\Z"h ET[Ur:\y.=ЭPwKt.$.GB=8; G61痿zϚӼYszYsDy tb ob@^}W4BoI=csw?+f.ZM _,bۡB-iْH|&]>ܞ~;wQL u{wyѼ[<]20Alt:U kpB7*8acic(hPs%%'h!ox|۹hW]ț@6u#iH T+Mt:SQDy\ ]#5'0M@P-_ƣύd bpQFcV )UƠ5 $o(HѨkmj`>U921V!l"SLq,u@Q4Z!L;,ShN4c)P2rw>Z1@;b]@2zTQkH3$qa|3fD8(ЈFU^k2Rg!= VL4F+!Et#/TBKK#*4* TA&5t&Z,zt݊jApavM012)kUMT1G_ iE|ƎrRҝJ[yIgNbu{q}>9W+Kd5+CIv@P8b'LQ'c"C̷qdLp*=DT-ibls8ZD-kgOe6w5s)r&f-j^j*bF-(ጃ)4|~ `0F'eD3Гˆ:l:e /`鍊\8x#R S2NmR\Xr)Ko>ԔVRkuJe*+r<Y@oyƘ!הh{5ʹgMGe];7:LU+ld6ί^le-s btTXQ{`?\eޥv!J1iM8&m~j?|hmqVV7 mN;vt&*TB~G";*;:/4F:0sBLh&&-׍zפ};)h$ SjNX`)m keة,R~G"N]O.ӗe#J\_NJSBdQ #0A#bY‡\xT#EqD-[2P.uFh|&qE%`_vAoד[>F?{WɑJ/z]dfb@#y y wGV7".RH3<+#xf3GVl빾}`$RPk 㫍NWh D۝)j1F\`fC<$Bw a`s5hS!b9Θ-/YCҵHDLZye*3Xs6ieF_UHVZEVgs!8\RZUhF2PyΕ2W*l٩Dy4=:IKN_(/f-_ģS;~tu,2߇ex||0ab*,`Z/qOw磭Tiج(EksXԒEL;nOĂ:QՠLQ[ye)*(#[}}d] caܙ)h-]M_OOl~КFwG5>݆!nw~*fw%(o-IMswgM&yxވ! }EO לV^Ń8H>+T0u&\є~#9MvZVT RQ_E>E=~-srvf/<o"U{orr#[IerXCL ^JȞE-QKCQvESRw =j|*H.gbb >XD{׳xenwf;F$3 yrir=vsC JKJq5~fLׇ"u1P2 gBBf V|Z.Sܬ(Lo\M`q5jf xlC={tupd&:q9T{ )c-ra|izD/*"؁AQKF6Fm5I^K++]ԶͺUɊO<+K M:pgtw⋙^Lb(JIAg7zy?"˧0z;DT :޽ngŃsՋXܳS\ K2d7EzN<2ɌQ3[D`邐<{IKܴVv@^SՑxy%σ/Eg99Ev,+q]"б% zfZ񌗲RL 7%5(1;>&HIŀ;ѱ߻sXˈAjdǔ2@<:Gu~<WK)3n{aY7`gy宀T1굃 7k*DvԛF]IKO󘸔'(6eTaR.ղc,G߮O?ͪy??s%RY#؅ yȁ!dp}) ק(\µ3-Yp,Ps+j8 Zb-Jrjsob/H`?hɍ$@%αt6c)ぎcvayx,]@M?tr»8~,s "2 ?;`:L :oe_33Dǘ Lӣ۫?bH2V c$ϖ&w7}MncuvDKew!J`R^V=C%*l9jͮg5$kCB~"zLI%Sc:zjRNݚb":MQGp%[Mn_4U!!s=Z8HʖG,c?(5-BnLR6 צz"IjNKhjxeq33 Iѐ~moJIׅFm7;%W`i$Z;k{E{:۹MaM$\uKSc)dkf;gZ֍z + 9cf&cp2Ny|R%e]{M4T=uhj$t] Lº4{czlt> { {mE4bV]EZk8SH +P}=V:$V(fL7CoAquׄ_4`mn@uIa̗OSpOa>|0(9TRxxEy6~ ˿:r~]#>@}wJ=h8aR g}ha:*Dɩ5Ÿ4q$'9\Qvhd+)L`qTz>x짍EZhEw7.3_5n`\(^fϳ+؆/!JREieGu1XwZ],ha}Eu.1P9 bSsy&}wEytjvb5V(1aqaF #x@6m_ױU< 〈@2Er9&%{Dd=G# s%ԕI1ݕ-\ JJ n6 'eρځ'z2W-;v?^XG" vR='H1ǽh5tkV޸E]OjrJw/Su?mEf)/;|zHao:? RLZ{dӠδky<caogFA6OjqgX67a`4k k{Uʠ$#[`ڧٲ \cl9‰wx=PB{vٹ,h7z56}"[߯bE{ 0fGF;^Z~mw-Pa)IhbYĉVje/L$6aljv]:}G\pCJi_) 'ˉܿ;:hT0I/I_ZV&_#V6A|I9e%O0I@ޞ"{igoEխSzڌFHbY>之\&g urw9eW! D7eV'wBQE{1!L\KS[Ν${bKm3p{~BouGKAKx[DTx PQ;[ @,~Nw OO q Pz}v5RֻDy6Q(^zPlDAE_'W+˺`ٴ DbPۿ5Pc75*>m#2jTS 4`PJƐADgC#N)BRe2.U1JRF` ԃjPDa(HPv-J- G c JAX a gg@$Re i@x57rAb%N2 5& ZQtm"n6f It2a}Հ8tJ:k Ce d$qz:\P@G`~j[wVHPx8G79*JǼ3RL=߯gϫZCW"m9ˠ P<6:)5vyq\|l c:'qF᯴5 3>fn8C8ƀ(!{LlNj=aq[ݐ=Io\H: A/0UD>aXF>U0'kYWjc0i`€j؈Q&оE0ݷc$ u ?rH> R|p7?OkF=Sm[T)@'unW?a$D,2*GhKF{ Q&q~cV<Уa1&[iMqu0æqEw,l6 W1Ą7\4 niu)# u]Bo/C ϯA@sy0hK_M°}fF,X{2\DR@,[WJ /8/ON !&^@iϐg|ako3%%>%:O0 gj^L/(^iFߣ>b+蝈}v0C)C灆@"!GJKІC=*5 mPf/$ !l#H)&EhèޫG1ޚ-z!1Y޳S(r[`ݥJO1Hh_W `X#OuRZ I(8ߏ998R>֚nT\\kTxt(&87+DԦ%aͽgW^2)H"Ҭ}@Ƭgstť bKp&n>gjSIjN`@ٞ8`ađLR%72EDC҆aDN'ͷE:ƹ/8VTmZ gHe%e 'p3)OB(C2B)uaL,n2]ɮc5|{9nTl6:spGEZXKzH<(mtm{aH Τ@u tmmJ'+ͷ*ZZes]4뀠MkFy}RxăCH.aϘ"eȔ2IENYp21Pl/SQ؏ 2X!VKm#wyꊬ;cj#2dG|f=m2euv/O.͵=vKJ^nvٯ.$jcaZL?m7M4 mcc{ziVlZ&p6βeۣyNrǵu>g$gK^>gK)ez{6]8wf]r~+uTqE_}wnu1:un]#b{ݚ!WES A`B;&HA9 6-^ݼQ) e:5epfoG8sJ҃qh$G͉wVpb7u՟sk/Ȩ,/MX} du+>Q"0 2 b}ၶC5.kt PEPЀq^h )|,>6*t¤)qv N*:ymS! q+Wyrm=nmH`ZDyxKsٯm w"'YbG&*K!|v;T}:9̆m#.?tu2ŋÛ[ܸ_}go5OByy ŪqhT 'Iڧ4# Ƞs_ ~6-1]("8C c3• iX):#&K e64Д2Ҍ<ъjF25VAІ#͠LPM"$Kddf78ξ"#Fbd엑$vэ G<-.'=Xc(As|3B-.N.lzwY1!9oUa­6sCGHxƬ,({Q4ޢ м38p̊=NaOƼ} cBL)l*(d@ϓ/wO+>5/Yw`|S~ݷĬ&$vt}t|o{iˁDi`bBDf+SUʉ$iPޑ?v)M:R{, l5,]Vj9ԛڍWb>39R/Ǿa)*i&RR0mLJ31 .qdsBC总>z>Ìzuߍ#'z ץ<<脝W{}>_b6^hpE7F"m (5>.A&` ̭-u }1/ =Pp 9ۢB4Dٿ0[ }xݯfK5y< ξfb8Cwe;A;g(3Q<HmJ.*MR|$kYa@$ΓmȬEeS Y҅ &2)Xb2AG.[xB7@b;vG(̈1}|B,PrʆCd31r lfK6D"p&cf_l᷊\qNף9ݦE4{ 4`݇3m $Fp+"|h;"D1o5D[ ܉ ڦ~MzˇCCFvY>[Cp(^>ww0dE<'pRu̘1)[uٛʗ),^]a¸_5HW"z' Kѵj"N.0Jh ctȵF12IDl _E0pWQe9O[ʓO ݝGjvrZԁG(QpJx!|TZvj>bKXkմ sigAf*&ȫ^%e'{Ѩ/EyڴãI7L]{w_L^,8gƛw 5'Pds^YQ0ry묽4|ݳ6L}uZgɳP*{Fm?nm]8}Iexpm7/蟷\yErщ$zkj \LK7K+nhڏ=َ @~rZNXpFׇ)ڌb9"0Nec+ݯԯ>wPܚ̉![9*;nŋSj?j!`K( -:arA3c$0 `Yd1e&fBjI(#Vas=@`nCߖ/8fUjOcB_10}{x_lMwLml,w\/*ٻ8r$+_XL$~t7A. &/eI+^̿o,eݙJ$AShbQ&9-{rY,ơ/uMeXȡbW(]ר\f`CsmSǷ&XN[[ J\g4n盳dN-,nhitCs=ݧ ϒ0k,)&VطE0VE]뼯z*<4͝w߼g7꣯sW&Guuh'=&90T)su㴈 +Vrrh 5ȍl&5jN6t#m `vZCv 2&P;iR(h+++*0IPJYTdh$Qѳ2naWpj g_1vU%wV#M]4V90 TJ!D`#i=e@HR(QNZ"a FŖSe[WeI*Ixg幖A7fsY0A 4SªFɖW.qR RM1c2ڦCv䕗s}CKgW?O߅-ǧ|9Kz~nj﯀\SxEU^.ҳEqzv>s<bŹr"$Ɖ /ħHúq&&ٝo>=REZgaB lsTrT33ᒲ﬘ Jq0+{-٪&aߊ`z&u]I2ZoQѠ8:]Je%CWmQ|4ۓTcV9PN1mH>(6z4g N ZZkZ8 x oie@neSriӺh\Ҧ"i"< PFʦ5_J 2ńP$4+J:ڶ:QPAW…Hcbb@2XaOB$45ӞQ;pg:?tsghd~gP J '>A, Z8f3牥RfԀRs ϑP9y+mPݓbmL6Wliv duݑ γ< ZM3g⻇ui؁ #)]hֺ%״Asٓ7I\;OWW2sC>|$#2ay`, U~ҊW׻L]^_n!9RfZWmǛj''753qv$w_|'|o?WXz:/{ e{Aa>YoSp>]_k$Bͳ{pwk# TR&V0]EWg~rcN^ˇ !m;VEX##:f=aݤy,vy|w2gۼNs.b/4J/MeH\ʞXsQj'J/1cGJuT]7Fk2n%Jd-vC%xvJBtM%hO#"/뇁3 Wp ħJ^h$Qx+_YkyTrƱ9uN9%$M,8m sr5Nc%itB1 yW3 ăhUE$( t-)YY :F #R + *U\J)~ V8V%uaUb OtxaW(J[Jnũy<F+aPE# r}z Y&emV,!BDʼnl.{LߐӮoϞrvيϝԢI ۙv((~/2F׫L2M-@+Rqyh0qm hQȬBPy(։?jP|;L_D0Geof.++J,y9J(9T*0{t]8v۽`CR[O$ LO(k3"$ ۇ6ZFKǕgzO) 2J@ <$,,ؘ,c ~+.dO)b{P00;E7[g &њDY8F+Z>=8XQ3F#1X—'壇yjROf_{;;syȘٕ\cef n?hh1:O?bOp/1eY(A{Ƴ )ͥFv纽Rd|=ې2Bwmx|}mݺ6bjz4._xEf_UZ{6')&kvаCӌ64qmzPt3Pj5f44-2cHPlY<5WѣIvEfv/3]j^"7LrtO|g\&TYTA-=ma #(ކĚ@5+P3+dvs[wh_3P;eZpF{(G/:((%NvΞE>,WH\3穹 ouzNlr]Fˀ+nb9-& ŹG%["CbI1L3L! )T&&/ӪI]LGe)`)^!m%7譲" lEB9| ф5: A-"jnQ@vO1Ih 2Tӵ_iZvlМݮLjzGPZBhCC#dLQ()u-IՀVrr gOY륂ej3΋BGzEr#UWnnWF:mX'NP[CV`b3˦g- Ĉ\m5{01ѩ#Zn&˧W˦[E Mft$4ipH60 GmFЌdD1׭ O#Pt1'hv?J\8G^s9Π1X+M;4m'͢{Nn])#E1j,s"?v˔KӵQ]*'}j_U+LiN3%SLKO鄧=܆od܊Ff#z=َkE( jdggf d'H1^1'L'li [,ԝwj i6d1Ɲ/|g _6ࢃ_| ~xh9SDi-L jʘAi{gSriR\k_:e!Q 9B!jgcQ]$Ju4spfdziԶ]f$pǙd-rIBFp%:qW6JDU:+)J+)l ! zNT[6t!:⠿EPKAqaXFd*2$pHb[SI+!jfFk cPWI0)fjލZs!nn9Q誤J<*ZrUld5X]`22`Y>H4z"P. ED);©oaFӧY]yd.xd!C\{yR6X3V "k^My#ZK<-\:_pKv?uuK{т~^n5ߗxr4ALF>ûyૂyx{c-04#&V̼u. \8%8|7^^G\)ײ֮MS㫾yb)A7.&>O >f닽fF#{K~ѧѲhIHC';ܭ5?Om TS!~~M#=~4mL iO307H/3 r,$ 0dW[ʒj-}-5O"Y~ϩ*0b+ C,v HmxJL5y:f #D6w) ͅz@ H~qp;T9Ur)^||q3qu9Z~b^ %;G9iGj41v:&dXf߾z~] ~5y.GdCRW.E;h:o#t2#:ǙyBҍ7*;峦JYZn6v^(pҕǘyB $IC 1Pdm2;xB 2h|!{e'7J:!(Bq<R ٘@D) !g9uAYkyUs?o9Ͻ2KJݲAu(eaQV)8 wWX)M[zj^ѯgQ[wfF+uӭ}]%(r_# CGDS[CNPGH1m!W WfR2wȶJ[5ykpYW?sP >/~#z8ņjCfo++xv19iRu:ݨfT2-=C ^0 M0jrN/bDU>"JBQ;b !`(ʆ쀅 a,o0VkJh6Bx>]o{#*&!\,L, HyLVTʶi%Monf[no?o@,uH**c<;V={ZܕϏ8r|[ Zy0 ;;+FrNAa(VZbũK̂o,xp ]0Ls~Qn11ܓo y𩣫M ә `|wY0>O]h3HqLs`nMC! q7BσJBGP9j8PX)ҟ q[?i~n0LX;=}P`ۛ9ix?4hSeH@H ]JAvOM4cH4BBZ\ӧiF1<;XwC Vxeb=f =T  %6']kL}uu%r& ڟ%pe4 ERpq@?_knbKz$2mazо:ߝQ&LQYL,-!A`G1+lTKx)L ߓmlZŋ?]^] T6E \96֋}p(2HˍՙxB6V1Vwu!eQ,%B\ORn:sKaϟ;IӅ[~j[wεǩX<Q9#S_pnr/͆\,ui,Yc oEߕ38PFn\%Zv-&edrUܕ pFHBwؕ*q`փNRv9.쓑TFɅPNT(,kߏ® ՆRⰟî :lJ\> ڛ $~ ;uAy&\Φtlfl-23YeX *vZq%3cĬfu, Ek>ST} ~3k[1|rCNPm uqzs>FcѵFc4hT+홭x ]`EҞ8zQvas- `?T8vDWn|޹6a{tP eeثt1Z>#(mzև݁-dݟjM9z 5|z Z?hmo,w+TB(Z`L.O/6CEP(No/fn}n\`&TceP*W.ƾH7&D!lSivKG O_ۦieZfE{ciS|&:ʦzS{7k>w+!tJ` C{Rޭ|&:ʦ@}Ĺ݀z21NEN>w߉_ZX7n+6UٻDexpcke°X 8W Vv-pUtV?PLvZGXi%஻( }C}D$^/u,-R_eA20ZZTA\7է kgsrC`PH<%L6_|FvJc6e fә,TǔL'm miY{u(1tYQd85OhyқXƅRmWr„hu xϨK@!Rx#ڇ7(љOٟ֨Ţ؟)*HR=Qz*m gs5>t~?GgbZz Rj`xJٟu`oW]Q(,hTбNF noNgb0c[l~ڲw 1f,w!RvA΋Lald-6unV;NdQ\-x)UikU;*eybl eߕJv17y]^]wv˔_t]/vbv|@^tH<N7Qs2Pdiب )_ps aX܈RׇƾŲl3eycZ-_beLMPqV)% ܰJ=jMQ1aL1ϴcN%Z a"0 u=? P%]#F "TL٧(5+qMEb5BƸ$8&q K',N̦ۀAʙXl)nNF3գ Oߣ5ʑ4BYxkQ`@l+$nh˽c3z2OJ핔eFHQr +YD].MkbDhVA|ZKVփQ*4tmuYSj",)Z4k-ϵ癉!uJׂdjkϣmK} еuŻЦVrz+{()v 0Rr1̝ d*;=p`d=ɓ+9k4!z;$X #.H{u@]i(4] 2AU(tfd( @a rW&CAg)(Ή"n*D8YH$йxp;d{D ):8N#',b2g T+`|U2V8/JyKe82/U q}X&4@<FQxD0,Њց,Z*oIPL/T~J;Kxz/2@J=Z6'Ph`JM@IVfgEVb)T֚{wTs#Kؓb(mkCWbTWuRȤ@aGwT^b Z2v`+E;{D{\ ;}nؙMWaNwE<^ .T١FBUZmeOX+Yy!a¬ܜb޸ʺrecĬv4u;A9\Ǎ+~1.~Ї,;_ (|ADݐ\YJ=Crp{^$wؐ#iv~]8!m3VPZVbOj&yyzJWG- 7ǛޚdO[=}tļ&ع@G;l6Z3LO1N<9ļ"F=f"yoAB5ܣ?_O;) wN@5(͉o=+(*%E˔[TLIM*$9hw;yCMОg?p OYêrO^>NoQ3ܚB|־qTQq^pD1-eQ v8>/m(m ElJdШit'ؚHS/ ;R#YKD>K[s>2^lK4zko"J}aϩc-E7cs9ߍ5:|X|)S_7[%9c , 9o.΂4)9g<'̿)KWc6qǃ?BAȬ낐9\)#`* _w/VOduA(Ūꤜ}0tn8:lOyߓJZѱh4r)dRIOg.:Ua" ##`Gfa$T $usR |6L쁥\TTwDPdW2;O:󝈋y=5r5\_nlq%8uZo% .ud[o_(oG76 3W?naBF.0҅^8jC'փwt;5E}1t7)I-\P|\:(T܅-&s '_Y.d]P6Gs]X3*N7~"q> ,bԼ`8`ۢ0UQڶR搱Dwlfak!-<M8pݓ[P!BheqV@^VPcGSs*L(Q=CuN'X'](bcg <&q!\ScoFK"iLΙ"9R,bű*&L[MjL%)u6퇝BL)萫4Ib%#;c9c( Xbcsс=<,6N#0"YY%*T7 rZD &p|R ,(W:n{hbc90ʝ`؀ 3E1kŕʝR+@͞ bFZSwKWQI-*NύzE4byl㛑ǩ=vNW+J .;ہc?\-0fAAEi|Ĭ-!!85l=Ū5FQ>6C%\U <sKzY Owh(hZ3.{u3L$REoM^:nDKR|a('`X,N9  7\@5`>uVh8<os}8zE/ҹRs?rVu:AoR+4*wRot4wsNC_}=ާ}=Ww |rSll+GCzƒhB!E ind%nޕB[8.~]9>xc%ʯ",9Z~ 36'Ƿ#>q UZCw鎝s.n2mĒ1M~gDРSĜ`}Wl2Fc7Aq?D,=̨4E` wͺ1L11D"Fĵ3ǘ8‰ZKrI0Ȋa4[E!131&\S|*֙zLֻL1wg`8p?W+l b* 23 B{Mx;@ADe1tH-seKajXi{V h`/'9鲥Hǹ>:Wt ,Du cAzR"$R-we <AYy:0ZG1vКP(ҲL)J!{YGWX$X4bB iY伡&fрyQ X vpmQoɀ Uپn`E3&m_ wL!쇃̐cB@=PhP#6̜tgtLm~7[&iFjB"v=)7}sIg&sE<-۶'Iq;VZ:|UܝeUUD%ro`hRɖU0px DX#%1j$E ͈Қj4[mm@B\\[{-6BAX.da-ajQU'yg}t ۢ:31 [߁ 'GԠH#3XΤPgD |Hm{:VCxD /{([!4Q ($% :LtZC>8 ! ]JtMw E xj&Cȋa7Me^Q6-nd;+q{e1dyk)yª񮷘W⣍>Az_^^L." &#fhNO4|bygmvżzbu#7>40f&6CK`fѶ7ٳhWOd2McmZ,;:E2ʫ#Xvj]d",:nZN}X_>R:~o*O4E2|r w7wSrH7;W!\AolJ/7aQQ:Y,bTw\XVӕ6 '>}y_n}پw(W=iwtww`Tu*&t!ieZ֭{fuCCsSZ79X2QhbNL[JLև|*^F:):Nê?of419OqџB2aW_uΣsB5Es>uhzf0RñkhoI5<5>*9>o,ϟiB"J9 +™) i,XJݴTI-JkɝOxC LeJPFDE!P-$(?e eY k Z:kv̦w߿. iFԘ\l!_\xI&H[) 8zDZ9`Je2`d l`ӀS?sβxe|,|E+j/첪난# 'ܲU5C2U==ZPѡ=D!WB]0='t6Z,7Oͧ{)21B+9lcX`HtyRe>q_G= &Jp]gKŊmVY4aڲh$SQimD)a]`;~q# Q.Pb=Pb;Tە.UʟӟÊh}*C!X8$~q;,(`#ChL/K0]'vlˇ.6?p/Vͯ Ol.p"v!ѕB i]|9uC{t>Zn JZ{@٘MGNhrJ `7V l2`JԄ>a W:>ygDj!Ne[ϩC훤+ .YJwU}cWlʃr;53fI| a.J i'hu!;ͭT]^46t)5s#q|4wef]Tr^728c3[\pLY_Oao#\v@T*u& 0`˝y8g,+\9lO%?A)Fûi8 =+܎~͏fA[f!BOر /'t\qGp)a{1Ǟr= JadK_QֲV0ī"0̈́˖xZPS)=z lL=rѹ)\NQ(ҬP};ѝD NI!"GNHkX}p8'bYύߤ ht2F!{İLbp9>R=bd!, e zgcRh?nc&uK_Fjw&w#BK5 XLS_he,2CB;aop8:Ԋ"\`jaX':弡y0_m"#L##ۀ6,[)`b7f)<ݾO7~3}J(]ŖE~zV.Cwz?k2 o۟\1^xꯓT}} " 7oFq2_,8zWN3pTRADrLťT]fy]|\fk";S[][or+`,օ%yKٶZ[G={俇ۭDmz.lKnŪ" ՗ s|WرEڍtK;~sWP~UﱗǐbYjκ~:p 9D;L/1D` A6H}km,2 ՓTРzFzF&It"g<._!R$ՔR;dg'i_L!T8ᰛ5:57ZCHVTWrBkN7Ĝɽ\hfƁ33 pE!@Fch^) s0y R~61YyҼ! b)s0M *Uy\YoJ5k+7<3[g p#j%?K^2۳l8Ws--K8WwRRfհ.ev 6R{K/^z^J^JkFRJܫ=xGBMyJ [S3bk \S3TMuT]WTޱԍT%6xjR_jM+*mm5m0uTUH Gĸ1qHm5R7鎎$ 13ldh|t+|Km[Kڈjc*i![M֦Z%Tkd[7hR8KT[u XXt׍cp 5YXe#Y&@'5gJʹ.ҝJ3[ǚAG<&`ⷛ_ڜ%’^i9f3WX)lFe"UQUͦ]HW1DL2pe2wrb[ /Ka_j^6y ~; y^7et(Ot#Vxir Ǻ`OO|)gKU` @ɞr{evsk`g }P4>={j<0K9e^-,>J̦>cK vLYo>̢1qwOK^ 3ۘevtc Dgi9(ձhg]Jhы29%-0F6Y%%Vizo~7Zq##^&Ev0ɉ=!&4GGU*8-X_ .֬28I*|8麇P2ǒaIw^تMY$3L*"l0yqd4<&k㓖ѥ "xKOڴt/ݲ%P{v0C;FxNtam2MO(]V%ҢѨuLZT5b|2l:fv{%5][#-xJ1Q uhg8#$]njTWy[Ě)h0N :?7c\J\?[q>hD?y>8muy[# jtH?Z:m~l{VY+{3!bkT FAeM=rY\ %>==\5RDZn8ᗤp ڸ'Q֖,F+ rAruʛ  @ OY w@ݻ.H>%;YùHέ#!91ʼnf  !8tf AgکEVĜӝrtKdPX4@0:uքiJ1 @rL%Jm"ł٣7xtCY??a9!^ e_6Ѹ\<%S]DV4"%nh"r]ed&]TctΠ jBtà[X(_|V܈G{9*lVՕ3 P7:P|9C aP:He>ffЭ7*i(ds εQڱ薤҂n*ӴZWu3D)H|$ZX6 )ZQ=@H)Tֈˍd f8b9>kj\R)JqŽPH[٪K@> 2"k )|pYj@i  {Ej*1v\7F dԠ;dERc gT-7m iBr#1uZ lcm .6qHb^ XλU6n!h* ||Dkr`]'1TJTN=YI*rH-K1/Xfxt3FrҳR[6h쒑m˷^x>` virTÑs zyz{Ι > |ŧ< ߊ1hТl @㳙vRxuQY_ug܈B;{x ҰTpX>k4rRf~ށf2 =tzx*+.>xtsS[Gm2SxM$:sj+t*^L6Xv(H|#w_`y꣜=AYj)b%~ f-֝^*]PlGO\ 'WR/^ Њ,[Bk1|ѳsGhAY6ڬlt =ˑeg>G1.e)1qe&3%ȤVX:%"4~ e_bgS}b%fNiA!w]Ad윛uĪϡ 6MTm{xDѮͷumdmA*@79;9S/ mq**E[Jdil,[ƧN4FZ*(]5DZb`4Hym8?^k#襭(||[9TOPZU2*kc Mwcw\+$ƈPw ( pPm -2P߬P`['v`ByВPT:'A[(_6=Ra1ef-/uuo1%8 =LYxuU:*J)(EREďa>. 9uW 3#JT_;nob`}3OVW_$%t c NQm4yp_o[5wo۪m6+T+ȭ\zҪޯ5q]ujP{ѯNӘ.23qsq 1-W1|yM,Ч_b'3x\VU*Td Qq Y@[Du5G NsY2AtƅvEwBd kV;uVogI{ eAN:4U^^@1x llUF}!T5ĊSDN{)u|Q[ysetkk;bӀv5!LpPkUͦr;MȾ)s[hXY֧el,+SALc>MsAГֵEf*TL w:f54k"ejFkozKe?Ab]NBCP;w^C%` * ag]`HTfՁIcz d9lo1C|C|vBqaD!nTa3idoǫZG^0v+߹FG_*m7`T<3{ *ݛ͟$}G xRLr^&w? F@fJfMSm$%}c݉B-m)=r-*.|K(w;odwpC]}'v"|U:\yJ1l \?_}g98:[\UvV >^:0lo6f|L`] =+c$^s>C"^N?7 0֢ =G̡.{]Ë U`(Sp%{Fk{>1Q(1f_jOF-,t$ew=_jj|ȝӫӭ^]l24VŖt|uGmsm.e)1Wzo_yCބ:~]yiן;jͻۅo ˛/~?,fA65\Y ]scȧc߯kf"gx޳1vl$]tO[}ݜR6T933JdS-E'3 B0iKnCu:C?$I6nñ&m ]SNJϭ7ZΫ϶#^;M{o~=5 w^ƒP7owoo~*Ng l1tgxH{=?nlPg*p2AyJj بYnP|hhWf17Hb-'Wt>u 5ٽ8wmKFWa{%q~pa혉> -J%Ul/H(%$h;q7E&$@$ET+LnօIUvja¤&!ysaR۷ې-pbNۇoc{'t#xnaKɔ ^ 4`P8BoZbR1^eB1R% :NPf5PzF/L|]n-f*=9/g*>U[ChQ?$[:>qb?=c]k_Z \/w\i}Kc3ڰ<Iz5Ab =Fx5Q ZPyKr~2pJPs8HʹIQw)&X%x#ꮺkOw}v"ˬm'o=sj%zqK^ ?թia (BWYFm@j(w[UԝKKvGl$]lf\j=7Wrn%=2irHJhjD8O8(M5/Aɉo<X9I7[;uzJ ǽYPYuW@Lw0:0}52!O҄u #y%b_mBF' q3`!uI 8|eQ50PR!` |FvODw)׎d1m}o'=t@yQM5.d9/,h@М L9DJh ͉yYui;Ξ;Q|%j,r"(5J(ץVń.S*HSyHh^j =lsŞ)$H{Z~$˱T ǽRS'Lbb_Ƨr]IO"v ,bEҵ5"x@}QP8h}em,x5?-`{dn7-4kGN"t%4$%5o9HW^Vo:Ne_r9>8'grs<(GqnW#~z)YmH#5\QtSϱ1DT&;G +S, &En pw$JpjqLvQ'MKa$#,M-/rUsccFRp:vu=ʿK7+XO|zԔ!'f˕5%80B"E/r^J0.x0r mpU>^p|fpFůLLžg?q RE?<Ƴ]r|PV(8'eQsI,SFQ0!] J@!%vBq3#A {nU@XukM!lͨ`O?ȖQ T ZFyufԖWoKhjel x*ifXMˣN;}gIn /ݻصk:%yLg0(ZUwY%v{*G`+drQ2IuݖO`h8Ev!(a[N.2)x(F_'߇Zl 't$QۡwR8xknnaHo=h{XҽiI@RBH.RkSG.ؖ)2>%ԼiVpc;ɻpRE/ hNbʱ1X*w|2.%/KBpO/3ErE4싮AU6iݍӚ}{z_m e_}ႨDw`> pĒ oQBppV:v!QZ|pNАQ9R9rJw4*5>GgpЗۏr(Ƭ> 9'cix{~%H #2Ɔcc=VI%|uFvJ•IN,A`>g;XP*"NSx[]637.OŐ3ۮĘMZMAǡN$|3pzHLrxfC35$O̬smP4EO@"%׉B`T*X}g2K * eWKBnV𡷉5 @/fy[ÙGܠ Ֆ7!"UjVU|W~o(å-VqYj2Zf]}卢 H]_}&KΗی"Vxo-@*%޷0ÍkB]7ds_ xlqEX ž`7CCI/(㦘qWxL@$$^؏ Z\xp%Hz{<16|E#&it5 PJ }]|=8)ip\so%n0_PP =2 (4cD'0<7% 6wP "\"|,˸"B q< BXVqf\ =Y-8{9֮zŐsּ~5KeԔ"i:3h#8q®mZLJB9M1SXL)9ET嬤4GeKJ5yޤtȩ_bF(ӅP%թ,K3 !VR6_@D4OC/{c^6eo+عxaI$8gd9*`$&h۳ӹ]p #ح;'m:8b\*! XOuYO%xW :\VpGA?+Mhg.1V(&-Olpu8]ŗE/JyUdg,&'q\MQpJPET@i ^?q4O# ~J+qjqI8ࠟ'𼧈QԏDo{NE ՓX>=OI0".r*"԰лim,E53#w~WGZqoSUS7+֎y QVyyUNm=Wer[+j@Y׭q '[ H^utu7ҫUk*ѫrꭜX^$)bW-`,>ozF?tθiU9jtEM8G ٢l=]{t=Q U!$}A7ß=RUPf#qx{y? @)t~ѵG}?mV5j]lɬ!J}pHL&|q6Ո>ۚ辫p3F6ٗE%ȋh\gg&A OjyI=%s9ۜ>m,PMsGs 66F<}OjPξ-]Mv̪=yͻFڙj}?y%rn/T?#*Ft: ;-gBWFmJهm[,_:/$nŖFwޗ+z[[l&I̲.Yg5u?,.d /xZ׳򷏏]^&H2 828H$B'hm$h5Њ`0e!1u߷Bīmu{6]w#oK!<={sZ+,O| @)( :/Nʳl׊B\}sc1 <8/!8c9J''n7!`StʣŠ Nl7BuA168prXv44Es@۰Q`xa dj;S,EnXֻM6{&FRѼL1@2SYVH*%MekRfyqhE WT'r%KNӴSY2IQ%W #R #ANAXw"vVl9T>c-}l~]秿~dJ)1 ߐj(JnW|\>C+ "[l@Y|ݦW3"շEzTEda"*mr.YO:33ބ ZnLg󻵞,ﶫ퇛[|nW߽3qjb)~/tb'|59|nP0?˭F6Uz%2ɖo+`q>wWC=j\uwU~̓ATuFHCB  KMKJw[߼G$܄}O ] M {3xf3IFw7x%3D > 7XpҚ0 iJ=?foQ6@=19dk[46yznjQv=q.C=o$LFƨ!׳di: cy\E%3*53*qcX\4ԤTBJe\țݏC\vsq2?p@96ϫ}2>$Z1{wvP{V'*~fۯtŤzb>3;̠B?mPG3T ҵjPNt-wU yi74awn.1'IX:iNN*` VKsJzCנg{"BcQ崠Y2},`YVf,޵Fb[t$4h/er<b"@UeجMwڮ5#$2`zSY u<.TP@ k Xz5fmxV ӯcQsa:kua;k$k9ۡxY# '%π"4sA5'n~ s~ Su[ g6 xu. #=A U^xyzGTFrMH1 U[ӍvC#c(aqS0c`{Oʮ6U^4#l, !Ň`qx[#4#AMM^KɛJ$TE[;X+E(Z`ja]R* Vgt"kz|kϔdgz?nVQGIQHtT?󥷍$gQhY2Gz -"Fc|8 A ;߳%PWsП|3fˑ^eAbY|^`0#Wnc dT: `P}{Ng fW~Ψr Մ8q_F \dr, *aϩHZaS%“žc pڴbAVx uP IYI,z- вTx+5'BE` Rx?EV2bݲV^TڸZնҖ*Js -U!d!ƲP@2%A&}@mMK\TbU^_>S?x E6wk=ߚEcw%S ~zTz7?Ƶ  {gS*{ JD]J%.^`U”3pnA%7oHly74m,疁NC~U0蛺viǗn2(k .C!=5"!F{:@zo[_W.˛ EaX(fQjvU)m[1_lu$P?W~ b9xhzUӉo|bYV·^g=g #UZ=U2V꾛2=TC۳gJE0~wH(k-.`).,%ݪa㏱}6F)mYKQJ4ҪpV+7ak cmd(u?UKU?rTh}*<8BkY GgE0prN,T]hmkH–\vjѢwFߋ a Q\zUy\ޛq}ws퉉bbHG:WY'j a]圳tpdYZWMy6Ej/9fMRYoV#UJUP.`JV5FYa$HuEl -*IzФ Z~$B`}”X!Py'Jȥ @̓'hW!xlcܣ/^ioDu}n"ҔLB: V"J-!4;T T;k=㻭}wongK!].7u?e'O4LUvCD.,})M7,Zy0?0D*2IW>С؎7w~~a[~ wϟ YL\lM&qus}dvOa~TLvhF?fKݷ&lIOxQ9p]&:eVǨ3TVvn?]wNqjI S?T?* -muwݶ6Oq1z鶕 $~rfo/2rQ`iF A)Jw05T93z.}3S/;Oƥ} Ş}B/_}@$x;ii-bnKPC[;T:K ˉ7'ĕ,UG%ggHm9sEO_=A#nJήI.j/W: ʉv+"jL GeȚ>^տ'vwUjҘO)`H!V߮gCTi9T/|^gz6' y&eS $s |Ͷ-W16a21EͻZFz>,䍛hM>Ֆ5ւeDC9Pc$7*:XeX (Kil-ea†8 ++n[yD ګ~rrgK"5m#D>p0("ٖv:+"S\qiYUJJ\ZS W(\ *#ѵ)u1~~ѿ7p魏ӝ ~~_Gu.hG:8cQv>;o1%)fwvËǃA-O8ޢOhe>8$L^GȞ/q//0 U崼31ֵV )DA[reT/u!عj$pdS>fLMT^t@lUj-9'*jJD@UMZgѴJrvJjwd,n+SR)dQks)LO씦򋜐;nFڶX-Gz%kÖ=rDg#[&Qr ϑttM6=x-ZmLlˑ5u |Dl(Qֱїo]w㳂Qlc1 <%4E ЍG*x ÐF&u젭^CTJo=z%h*yH|hh{ F-z$BA,o,䍛hMAFdiʡw0u:1ox SOwFnޭ y&cSܵkw,ruc:Hn.ͻEz>,䍛hgb@(ewCӄgcֱ068^)h"o"/ +;^)h"YZRڱc7.jIrR'ÕR[Ok NC"DHH,̜JA2f1/5dVy_]b)լGwd*,+EKs qj[~h)h?{W6_aqQQ*hDe39kJ-w5e;(WN ϋu tT[-MLl ,v:kz:5,&Ut%HB ~mãzO \EOlk8fl"e6ֹx1,74s|>,};%N6v QoOz#`AFyk0Nh<:mmě>cou}k7+p^480!1he,S1K85b*8'/`fmi8{a]#|Zj ?,tу?|s;{qЕ%ᶷ+)'p,8B'Ix_%Qyf!HƊQ 10/"*HA0T($R-8O4T>SAS}f^6#Qg%UR}G0X2^A_Ūal\rƉ~Pu22 M:U⳩lq<2z5%gn[ƞO5y~>՚*[?T3&ڑ)ZwJUհ1@ͥ}{\Ӿ+А\Et֯${֍B Z7_;X 4'5f;;Z*4;WQ/*uW.2^窹d+[N9^I_&tBϧ:<`'y\ji ΄NS=ءZ!3k>wXi)}.ФZzB#NKQK)vRopy+Ճ=Y^ˬDBCK,ƌNKXKrr8p|9-=M`j{QWi)*i[R^馥$k馥$8|Rk87żZzZNKZK9sRά-RBKOQ=ء|Bק9ݲjba_z.՜.uZbGof:23 TQ4R11rXF$ H JY% ~ TJ!R$LCj)H(-c%s砢p:L2y 3a|3-;<o#OGÇ-qÅ5U})곹 T}ȳC<6'3OEmYisyRM6mee_'[wg2ێ"|d-x\rf^\lRn 2<,ba52u[+yM 'h<:hG/mrhɨGcIDY1VLƁ=A)ts A;؁Vb7)]ORv<%UqjwVe8_d&Zv.eYF|:43۲q8ܦ@Dy؛d B_oz!T>\+cwr0= ɿYp/٪6B1[DYlz̶ w֒z5o%,)pӖYH&XAOckz)hO=Po<%',Žh2/f(mZ6p8\W7J`^yFe͖o\^O]ZXe\RFQF8 +;T#:NM &Z$"؄:qD]K*(MhA|L<%yfPbeج70ۜvΒaXwV#޾=E(i-t"WMM zK"t'bKf<?fKl0),߶X|~4fym{G3 <=fW\ V!R4$!*P\P*PR^/$gA0 P/u> HhKsHZ`VlyK.&RR2M&ĺ]kZ(hѫh2.`a *"Vf ܔ}sp %cEb77wRir9{n5E[]C]1=)Qvq$zolmMpqo1Ƒ 6ono2_Q\ MU9&{Oz3M'A#+6Opt]mnեVKUF#BgIG+?~]p825 GrOSI;#^rR:8g*[ ݅*@u VNmq\"9#PƱskbnMAQ ԃjjvї4tJ!TyR# jJ /vA&?M5Ff:Cn n%:^ OT] Z!X h tXW"AjDvNnem=WZnE ʫ6qqЫ))%[q9JRe0dXuM](QKe)Zt՞r$\wo.sz}̣ڛeG0"LIH1bRJqbLp82{S*i<)Q"M)ȓd`EuRi"%ZE(-C䳭}?V_vlE(+Om9j-aL@|z=s@-]}C2Flgh=fj5|qcLoKNg%4CcmۛH"DMoXty>PSŊ-w ~]fG{'"V5p \)n飯G>nZyt[}Sܱܿ$r#LF ?э#ySR)qXCX˯ۯnj{X-B .Jtn)/ ߦ}PjqKmXS_[y`v:9B^-! 5F9OJ j1鍧V{ɰp0J,$G& qLgFqNoGv[+['.߽ґyisQZuםbm)zI-=E`j ^RǓյh)%nZQm i;-j-eMKjM(C%$0Ig)f _@iJ lRIAS5٣q²_|K-OɨƢǾ깅SEp*RԆiT>Ǿn-MK1xSF[R WQKetT]Q=c2NMnbM@Y,H)RM2j,1Z*Pcn@M$f$[2͞ %B1<xk~G3˓m&T!OPಁ%EhQkYr>jBKvF9Qr_HE &qP[$VF:&Fɐ ;VJeXMryjZJs.,޷n8K9HS7p Ͻz@oB0/v2bX~-5U v6e`9H_uM^g!p՗Ku uZޗ@Rp݅jRQ|ŎͿU}")ܒSM;l!# 2r%Dc$bGTG!ކO*B$RیceE:W]v^YN{) [{PpaD0X$Z؄DZJۙ4B<9DĈ' ØTaJd/odbSj[9ADhIh6Ә|nLH)ӯj)I(&I(8}B"tI>aC>R 9]mQJ`Z=6!H_mьLJH8:z8<inM^X#^i4bvڔZ5|9W6t/L8?`gDq۟I#KҭUsg3{j gstI3]K{;^' ff]o$X+ZFɗl|::TTry.T 6JlUp537.H[v7>}=9[Ze#5̥՜9. tku">bvX8jKMLΦc23*(׽'IR_G&2 ǹCP `aejS?jŝYSnkxD-J#t< ާ %9%Q|+}g׷:A4B~!ZiLqyeCqDp.x [U:U(#NtLjMU[P1B~!ZeLmX [U:U(#NgLsNQMK_[# lge{Fq[\g6}rp/yA~_IgѠ%n~4i\7t11o{Z'|l*1C2 ;&oFP,U`M[GutsaHUld"oQۖN`yHhq)4kl%OyDCض-p0KؕaW ;s6Su%xrXڟ,_OZN߄Sgd}arqPi̔Hp܉jω+|jv_ .mXiѥγ8+I7FmEw'`I5kw۔Eݦ,VmR\' i[^.Fف5 @8 }ok{6\.T\G#Hcm(.Tl_iD)tp9f2t(0WVbŕ{Wsv.xpݔ $юOW<{bOԊi>w$0eoCtYL-zɉŹؗoFzJao>0~(@n'X=9,+h㻰ѝW O9Ow1$:!)鶕2S +ʴMVԕ/W0&xݤ5vz\ڣ5{(RKJ6Ը 7O2+@scF@tn2f.,Ӡ lQX^\\]x:N#zjo+[l)6tMXK7ucFН3MRGk+Nj}zFQ>[8 j{&rΛnFh6$ Zϣd  ϭכг0x06l2O6${h%"] >@Ys}Kg!,#\bzנ[E̻HsJCN! y *"e GNHaҢ&?IқgsSBhۼ=XW;V3"(яTT܊?f<'umNXl[ b7yM=>=/M*> Fa!Xϻf3=`}@)d3v0Ozv/3@ut8^BIf hγ `.gӢI8%5y6ۇjlB "5xHm;U.(_gIJ=b}!ø[wbyAyۃ5$C_bq/]rM$&]\"Z^ <@j 'V (F\0U2G"m3GēaȖB2DRKrU}I.]RCPKhrBZL|zc1)u8 v;i2tϟE=l:.{zQ5cIm1"/$@`!DIg:y>|-)A>V^އ/zA8 70n%&L1"l/moEoDY=>QuORB]&ck{u)&BrK}#q)Nݽ.\邻*DVg"-ȈN,b բ1QT:>[ϚT*TR6ui0ץs@H3))\0~ai.Jh UvNO05T   :?To jh4u<}uAU=Г^^$c0/cx:Duua|g&_M{%ncP3>yKD5 ,w?\dfrdo&"yi Nhjөlkou4i20@ hֱ`` v< RL$sx v3r!, .K@ܜzz-D+Qևx.P]nꗍvgP{|\l/[ "k7 d܍'؂"U3Pwt=Xsy3uEcN%Otѧ(N‰.ttXn/9Kili,$~10 _pl)$g+䪭lupRS=A9 w #2@]JDDl(L{X6N,.{H [ Em-y{q 6بm)b\'6>l9q`BmH5kL[PMTgiu Iu Vrʶx|¼ivc)ž^%/QN%i4uPUoV(nr^<,ds/6t,ޞlQ&Ol$^i/jB0Q8G:4xȨV u/3Au,yКrh=jKr_Vb*}C#Ѝ4L%*ac$7], 0q݂ng K{LX+IRbm OFȃʃ# \(*wKr|R"e;bZ׆ua*Qr[Eߑ_OPV Ki i:;Ocǡ-e vE$\Ũe_),?qde]r8-QJc+G) Ȫ:A*zv_9G6*ЬZڞ:sxmQ<{j$]$]7ʲ`E*2.E\{ޙEY)5`Ţ }*M CRIҳnS%l<+dxs 5ܢ,)}iQPe@HkgG[]..ʎFa9l(96@IKqw8Ud}6vfރd~!%J;6?>?SB%J>LxY0^$,x;6iL (%Z‡TL \W}Flϝ/YA`8o&AЛ̀?fZ.d`8wz:܇2 ڟ=cz%#sN7R |c f1 ^$! R &{A&)H&sl<ͭ/S q6[Olj.x/=%K[xuBͭ2l/Jt`=Up5z'58d{</1x>{z5CǤ`$x(FX Hg:y!ho4 p akq+z(gQA&۾Q}E$ 7.ȴ8X4vٮո8fuD Xrp^&sACU_۪#Ic,7#ϑW Lׅ+]pw[SMY" 6TɍH}GEoʢx"O&*Ș;gF{h嘐#C.:cr\ǥ$@M#,1zQ<˲gyVuбXd« %Ui4Xgʣ ~K#Y>л ](y.p))''6UArU_`Nr̿Ș2bG 4PK͉VB Ťk+SVB*H;+!Xp+Y[C2bݡ s mKG.wmH_1n2* vnSAюF3=~Z[V?v-vX,"p1 nMn1vV.;ӂiIW.EL)srdK'i'+EЂ6')4V'kXqPM QE-Hf i]u7XR|Sشh/ߴy_J7:̀f,k*q0[Qr2S K\.~= +"6Yyv~v~&Ph W4{n{` 5mnJN"v*6혻Е[ޭ MfS巜bwe|(3%ϣݞo?y'بeQTl~sꈞH̟߽ż|Òs3ZC~6~XQuqsXήr1hh˃ 4?urjocb.zrdY n\_Œzq}sv %:+QmdioOXo.s_-_ovSd}}{9A*rrtWxܬӍ"{L5)aZ̔ e v1ab^q5efNPMM:Qz"ry)]_,ޫc].N=N@z0{{WEGJï\/KώŚkJ"+?l(WmwϿ'_,.ίV%GcbW-ғXjvQbJ9LtRII,PZ̿Vә#qKדy@!Ps뾗Noh P);V24UnYf)!ղ ~R'(0';U(?/r$EyNjP&I&BsV֌)6̵oTpjk􁜌ՍFu|4oOi!G$&uDzb`\WJ'+ SܤMQ#o[ rk?U)zDZBAA`9Hsh$q9x5!&ۅ2H|n'`4j B `s 4E-Nr0a ɍg >gxi+:u-iFgieX\1 -<Șa::26Cy 2#҂q-xQP>aQ+/:KnF^˃/Dp&V۰uN9IA.9H+V.Hvڊ@}hdgKuC _QH: ,degDk@2Q#0愥*Y@tCUgiu{{++/離,޲B?^M>4IqhuM.Z_/ѭud4!.~j'i_iM>*= y72%,]dr#FT~#}7]e?ў5d%3 mGnj?`ݼyl%!"^:{zۡ`Fy.G驊3uȕR:wl$=2`Y a,j2v>1qD'n뿾/dyT٩E~wMbl˧OHO(f|Mxo-=ȃ'ʛ P 2?E)=Ծ"J-l GBAR;P&>~ODԊ1*5_e |*2+9\W%gW˳}~! 0:LI8PU%^(X)^2%@Olւ|!)}@A]-t^` =/2wD(+qcHn %^R[pRhDL 3Pq"#ZpD"bi(hietsmt;'- (u*)L~vsSZ6SA7-AS[֮6-[֖k!Pު^+v,em8ԩ %H%贎^ 稘d;EX{o,+6 *4>Kҝ-];!s" tN͗Hi^AXr[\9 P]sSu6P^XǸ`M!ZqJyHaDMpǘq4ϱb/0:6HXq^ 5o+eEnjL{倾;2D'i2tx^֝c $]uq[X%-X.b,sXߝ,>_ކNpOW̽ nn|1z?ako_"Km!GxD4L?mF2^,ak9KG>Ӗ";@[$I>U}"QUn0q+.B1m8xa[~2.aM-?Y;lj<^ss!bMRt֓Ǩ~CVJD+3ທ ϻ2;ϗ0[5M.BdZ35PeX- *RGtczeZ[Pཱུ@`TkZl>C VrQәV]2Zmң j;i)" oUmYG)ަAU ԉgزA #sܝgzdֆа74:3p0(xI7'7,^;3`>c!ʐp&s8ǃuVL$SJ Sןا?dn7I{<$:#ʊNHYWZ!9j2\8jOhArJX k9bAwuR):MJa (01rUYŽZiB ˄SX̕gڝ O, [Ņ4Ἴ9//ml'|[asxUdK>? <X&`,yw"CئXqT| :" )+s1IS=2^J֊Jߊg&f sE32 'Eu6` .3͌PZ] sǕ+ԖU9}t)W͖cd9-ۋ꓄ ` @RRB%TŇ43~W DRJǭr2Tw+y/4vi}󴮗>KrO'+LsFxDfoX*EpQ@jZ2tC*JF"~)xRO1\:Z!"C?͉ZЙRzPxg(ET9Y3O\j*VXluh]#P6͚o^dy_J0\+8JAs.j|gxBY |#qfg٤Dګc_sG!0 NA5H?8Y(\Q^{gYD4:D7;58T3[*4Bu >FP%D+<؄jSE.FQq9}Pm[xz<IM-%tK;tc 5 ^_CMJqWb$+*TT. )ts;\źCs}\ 7{V)c{2Y b|ƃ,J .!5 Ea\\} i-޸||)K+H `*8rt:殴_ڞཉL*u_H2%% 1mR; p<.Ep=Lr1 X!^%pz#Dr ,@Kn}8\5Eͤ;K AQRcO=!)CqGC~zՎnGN^ i 94q!mI0n/7'N[n//0k#z FOr:MC@aWiݺ'{p-o8'`K ftޜ{⢌l%+u`R& !_p]礔M *( n3}7J/aˊC9 t@Ys6:^ 4øQ&Ӷnna/0Z0ٹOqpr9)Dc V{8Ƶt7׋a'zV/ИW%x{|U2G`(?zjs>ƀ˓Gr<ϳXlV/2Y)G6+#Of5AVxH <~ X ?k/M 7oTG{,%@?Z5?<Whp_Og_ժyË_dO+X_ l<׳o^m>*r7xy7`+LB?"7Zv(sfmvg O&"2ĚhId:]̸|s[=aT0j:'tl.~ T(Ϫ;@bA.lC.å ]= gmD9Bj/G٧DO GKq;]dV >0ys,k8V*&;Jh!ȏEQ;ֹGJ!]?$z՗ey4zRE<]p^ms wR:ʢ#^M$1QAc&N:NBHQuŮ(7<;NfVl.HW4b%8%ěhH xEWmJwF4@~κbщL%#IRT Df'-L\s`L͞ PO<{X&PpN(<\FjS3$̓ =Ic?pVI.CjϥphmHIs\Ato8 ~\{gipRXc6KI;#f3x251_1a4}dOԢ_59T&y> Uj9NzGz9߯6=Și[":qQ&JC HTdeLUCHނ:+ f$iBHd_IJq)S Q$bH"jQݔa4}/mj0$f5bǩg2J18 k (O+/aV_> raBkƃ$^n%Jv8o ߿ (g2;913#JUvm^kܚ^P'?(!܍LӦ\ l/_* u"~6)|hvtfzDG.20;Vޑ$9I?}B YgM#]4%AcJysR՞Uvz+}oԘ*9X\`+#5\4knKtc8;<\oo El#aC8`̢IY}.֊u \1>1rv3p&2i.pJȣpsTRi>w oU$ȫqz$0]D559GpW]u[0d ށ {>߅a FmSi0`8q|kKM@ X9dY40: LTd.2+Y!'BYJ"I&v>%Y2 q^H&&~:ĝ)I˦C 0UbN8JJtڿ4;z-{>t%{U mJ1ޅS8AMRq+cuX͝i[\fŴ1RC`zE+A 8ҥyڿ41BўfӜY}5kzF3/#L2T>  l\kg]-G^XeN \#*발2Z H: AoэIx><{ɘJ-6k (BuҸ<>!**Z{ip Ёp[+Uu_8ifY/YnA-s8 V^A >L3% Zmq>P=J,0H<=y'h~}7[bU}In/$b XBf0%fNp}\q5Oq? !<du#R{3,ǃ?Z6A֓i(3/“~;,C#601M'gYnyq5ΫYΗ!Dm&_ 6l/?5>yuxk&àiђ{IL%DguT?ZhMfE9\1toxS*đMʡ$ !a{W2Ǡ y2oRFUs%H&"DQ7χpBȱ86' UfP{wnp?|mi RĔLAEt" Ҭm2>-n:+2y04xeRr t$PL1' a@)'5YGkl/I[0:2ϳbDWf%g8Ra3{҇\[ w3ց oo<5d0VF; 3AC$Gi-'/BEL:sWn#t{MmtXug-2uek똺6&R` k8{^ܧh2ܩ+FmL̿>i0—cGjKh#,Άu8Z:5?]L'q=zg5"mF*y)y\kt֚yc0zζߏ[Xe'7/PPϵ J(RP$ ?-Dl;CaorɨH%e@_GY:V H?z6|{gΖBN3{u1"{EFEbOR@! 5z[ uY>'ϣ>?;B_+laVש(,}V Q#Oc~{i_[LU<\Cnm%g3|QȂSf(Gz+(p~W"kb16 VSt R'= %&f&,M5|gn g-zIV@h:9kGBfۛX8 B~9 -dB.%|1IK"gjXQ(X)Ě={&zvT?&"ki2W7 K6m|dNbXSd$\ɀpJȣcoXW=;I)/W-)p&s%$#?hkm=\􇊬ΙDnLD *Hf?Fd)8lm{0zr݃7S\OO"KYsF=e /'>3ܭ_}Pw锖q\IŸʄS,Zzchm8_ˀ ]lAK +er01ri3Z91`x%" \4K/յ՚AEY/s <vrj{v}aS@Lh eNkƂd핬=d=Pi@.b=|Ku'y6gU6e}쇡;dbO7^}o'.RoߗV'zWX4(Q} J~_i/V:9uу8?i#T<~ÃJ/i90ߏնJٮx_>p9i ?H*ZE>?~ySZN?Xh$e5d LO.Ƿ9^p<=5:1K 9"yѯV&?O'W׹+}hE]QZAi)?~\rZJ3r8|.3TڇPkMU;\Y84wg/nkrsqMIur95gq~pxz}?Ϯ?Mˏ?Iş]ӓ-rKXhK\S0@;# v)ۖ1峢Wd{*(u^|0_&s5H۹.i酩,)bvy\4 h_bz? Gw61(8݄j1λ @|qMbN< >xyg\^T(>Z!Ojgza,?~x(|?GJl&zVc]2Ƽ!BK_~ﮇҰb=E#PF+T+R1/z/9pީYVẢ~ɳI?/w$) cz&:1j?Έz3&IO8ɇ͇^=ihg?|~.Ng [@I/vǸnZqb m=o)ru<6 ;ݪ \Yጴ('}I 퍵JDgsY(3eS<Ն:̦(5&Lֵ6Ru \|x'z|ދo/oۄ(:\1Pk(WFy_@b_眹 (=/ @u(IܲoQFGlȾ#gW& NmE,'n:z5{-Q ^w)x5݊~z>]>겻{(Z׽";q﫚sCvԏ.R8<3ϝKL0X\Nϯɕ*If=c^"jTuW.Ǝ;t~~`6[1ۛrLEb)l0H$ۣr6 >GA.5^ _MNq ݼ{ Zɑy뮟ysOz^L<w1_kt/̑jk($w,-dp!Yq,p98S$+щG{iLRؑz?8nV\ Bx$Y)-)'Gϼlc҂Bgb_l d͊W`$#+ȨrIJVj%j3Ӳr7;RnT+i ^n6gzĆ-n+YfCZ2X2OP(S9)&AKŒJ%~j%r7b礕cZG:QB='D*[fqO7F4CJtz쭶W`]i;o$I`TP(B e;zMSj$e}Ҙ!k |9,]IB,Qv&<#ÕsD+v!*A.6@ c(P!JdaSP'o0?qpܡ!yo,E>o:Kn}Rf]me*!fYoPT1Z1 1ÀZPfIlYukIfSˡXYRƃ}qlOT; c{R]'sǼRf!֛ɦJWk;9Y>isCZ=%ӼjsXhMhl %Ӡ}YK~X ̇#z|g٦TPLd緟cM`**P"'h6Ŏѻm_] Q[[NoRQLRg;홷Ѳxh,7qMۻȅ^GYkDVF`b-3*0dgIQUw=CR*)g̐G,Q >y=rA0Vţ?Ruf7 MԻTJz)[Ii9Q$E!S, JsFYb1<Ęq>$K,3smQL '=TFlP6X=j#1|29##D壔V#2׹EɽVǓ9Z $^rݼ$.kc0e"bDK;P8e͞b @F9#ՓPE3Q'eCW/,>7L,,) ĴȺrhjE{Jd1y&׺n][C<]lӴz:YA{"KRJ$zFY#(D28ŋŅeQ3]CS'^8<޵<p|h;?wIc=֩ヾrsX6`efrA:QKUo:'(k/(~Ӊ;ߖ$N  k)\qFy%`;kwVL <ǁ;6;jNȜJ%R'bU-M 6u",y?ىZRTgj>^P*q NSuT?RR =F vÖ zҽ^UBo)>~JnKYJşx*@ TC5+-'C˞R}~K~Ru9IyK)UOJ[^mա È :A@MM>wk"C EA!"P%\6 s5l0!p5l_C+ؗa(|j؆3[5lpRbj؆r\zj؆Z?L 좶iaȥvJaja]aIkm40敢EP%yBLd&P 0gz9_!4qRyj5p6JC`FuUŪ)Z6ݢYZh5N唶PU+:>KDi<"MhP_G`-ٰG'$N/hNQ Dg9Hj̔sѢ_B:"M錊~\!%$J-&6 J{AG"T \L/'| `N !އdFF䢍Ff\XkljJ >{z †@lj`A"w!*Ms&57NVpipS1,"H#TPbW5bbXkYet2Tiq, ]\~pՏG8ڍ*@)HS'P\pk3pg_3$t*" 0|,^NmcĘ~:al* G}IXe9mpڃBPk =Ônt1H#胅0|W CZ#1 ޚ'6뽄h_]\}::83T1Ax1gP55xV 'B^Sg]1U38d"3PQ\I"d)LY3b>Yq>]_ {Cȋ =)1(B& (rɱ)NEsb~D3HtC_eyv9s\@ţuG6VoOT,?Q:`?!J=A;2;_]y,@LAҦ8Af#3@7{ 0W֯a@Q#MӔl R&ec+`ʼnĬ56gtB /W׊ ROA& kטLSmAϷJx<2@)y>\~gn DXOVk-Z5l^ǽ/7&Ñe?'Qt $ qPظ M h^BgSF݇8隄bn|~ OcF,z eĠ LS0yLvy32qR:4933O!#JUFS>-_lD ynn}a2<4"$A1JvMVmahe}0RB㾒@ޞQ;& 3|Hx7&Ȳ$kd: cy7VDNn_+9 0(}`m&=P%٠!^ 7vPk&9 5줿xI> uS[t<|Π0Qkw3B9EDʴ) XCJYVFYX5*U` ߨT$jT=UE*0@ ޴nJ%McۛIRQWRR:MXRy<ˇn¢9n9׆f-;`6LS Ug>{ P `Ǝ ‚}~**)͑RR\T>[m+1崔7BK(9դUK_RYNK d/tg+T+,/\KUI-U="$+\Z2<4H9 i3,>t pvݵw}ug"t,O*Q_-7ZNU x j$%vYOOX#@%5 c uخ*,?FqN4cgiRZ5',?-(<"&JQ%=B,zQ]?0,S8`pI!#S A(F&ב?2çUQ8yv[9M'PND [Tr ¸/[ \Jk#FqKRy ;r5WQEq|Ed(pl.!W Ԓԅ-oH=uy\[JCPƱz*̝ں$|pf_~Ͼ~nݬj%ie[DXL4\H%xqԾk3$Tfqes JOI' D⁼+ Ě[ F#i*wS`SqmV_-t02c 1hh082磓@%638q& 8hFqi}*jKтܺuX )y.i*jdQ, \;ԁ l`l-!6r!y[cuxziB5?ɥ^8/.5NX,녭 _ə_n9=&`y3zvzɉWLc3ˇx3~҂K1(iRD7]%Fo!Du#Hq_(#'uflj-4d\+i#L+ ]0Vn$F`}5^3`Sp_z޵JmzD`%KjNAp|!M}ҽѹn2thkj (jO0Cx^{Jv5,օ_m^b +Y01ϹL>ަo|cY)Ҟ,vIbT~3@zFb~_s|5|s@8y'rc{,0dq*>Kz>0?܈3Yè+/!T!W>k ~d!#dz6fOr%O;{Jk =ӨB;a94v5-SfMAN( O,7 Jt2\aw5,Jn 9Dǣ=]'-Q}9iϸ=zJw¦~vz:8^ِ'~I',*䆳%ԒǕw_ͷ>/?qrw;\K@JYjB]$;4+Q Se&MxM;P'!^tSMBGLЦ6v[cMg\=+ڣ־ܳ*i[VgO[ShXg^ܬ_t>YZ`(b^_@ } l'9rߚjTQ]`{)ylQҝ`7g?D8_L01Ug2"̓T-.3pG7^2SfwU~tT6TnyTj3viCʭ/VcXZ)N6:7+wgˉJʧL)EDd?);vg?Y dy!˃?ǫ?vcIyt0$d&B.(d 1'g2bH.ȇsyP?[Z38(˄ i\ET&(U)_CVeT;xgWH#nMh\Ek{q~({A)թJmU%ֽ[ `e[7WѭuJ("~t/WnBQk ^fǸɫԭ S^TRM$}ZJt9-%zy -D*ՄjKr;s|`C^hi Q7O| =X -%Vigf b3dPF. BDB qO'CEcs=5U {wpEzG>鮀tK QxeR 5 =\ %_"P[`]Iޘ>3E m}n-0KlbXڹXVJgJD>}V9fFƸ0:YzrIgMjWtòHoyFOj9:qw=~hV@E?jwFp,ߪXbogÎ%XIlVЋc~HMK䰻g zX,UO}k?~U5k%%E&BhKk,-xg{6f&DT3 Eu)d7UL)`V}9?_|_o<Mvˍ˅7͜ˇԷg6i4 3X\ܯkpj2&j#p 6;EΪԣ8p144n4)_=P9\♂9(9!FFuͱʍKVxk 1- odmLxŚTdhɠb.:CFpz\jx>޻7p@^S*hK([ZBw`[  ZR*1nb+3hp|kx I< :[|WSM9DlBntj쵟?g{N^Dbz=gqJwNQ=wΔ~7 9xe׆ן܆? /րclt}l¾ӓq0!+h*AY풦 5GG }F,c|{&`I>^2I[{Wkwt"˻#ˍjjLۧ}W7r8r;#&cڏ5RT2N123q3kZɖ1֓aƩ7lecy+3 d;2N XN[c OeH^ʦ 'Ș4JtdM # ;;Oͪ>u}IW 'g1 WJ_Y\w/٣%6G;KL\Xm?nZ}Ztty*Ώ߾BY?o.;TkbO7\nz}.n5_j@βT=mo}#vT͆.2)\2u\:ic@ӡ \s `|W8d1FR+FQ [ڿ s[]wqlk{?Qv|y1^"sҝg4"lbF✍Y7nN`|gkEcsbWV,E6Ez*t*ڡ5Fr4Tzj_ﶙ6aU:&\ J] uùıjN^JUK|u@57qMt\֟,WoJ ~^ ly02P 4Jv+o濿`7;m)sۙS>67񷏏zW7mc'gG3^gQ]! brSA DD>$*3DӍ5fe#ܕWˆ77zejSgR炣՛G\\^ק'gN )w}$+2B$ 2g`n,lslц+,QJqֈFU@θޤ=훥.|'vF]k4++Dk[B߮ >bedk;,ەU,Ѭ7wgISFdꧏ?Fɬ79\L=Q\9J5xqY`^"\WC^F[2He[ {Ī5o _f=WzJ^o%~L[\ I1OOLD]zØ+C?'skqsHɴ`ד,A%% H|sTp)Sǂ Oz@֮8J'ݬE儕8όD֑dƄֈ g75FaMRu/U𷬍34kǧlu@q_ 3  t8+Fc >XoJ64&,BiB=4 v޷A.5㭱6@VF!R(/yiZP,oͅ2뵜=d浜}r"]180Fަ!ʍ7lL^\*DCxٞ=K@N7u6t!{?FNU*DJ Bܝ,MGxKKiu#Ѩfo{%YuA v+uǶUѿNNOc c0N8/5q8mܛ3B!j_5}eϠ̰ K{ލA9@,Zh z‚7R#0H#$Ya9RQĠ' B%,aLRxnj%QK1*&5_'sFc41R#e]ɮhv,>]̡16\m(6Y3եL3.$j@\|ss(,r/um0Bvp{]Ԉ3nEaH=oep\zڸ1@rjh1AT(yaAMlpzzK!ݫ, A|޿{SiΉ`8(wtmVIMÙx$?i)2QTsi"<[`⨔@,^P7!4&ht"Si_Z~V0|mp+Lq{s1 98í1%27 #gryθAQ1HmtzADK:Ly)b[!lZ>A4J(dN0O(Fi઄i8iaШ tVա(& XF6LC !T&<='rbVeJ :Nn]Qqw58{vueuŨM7w?j1kDuJΣ֒u{9j]o 1Hmj稽huAtiqpqϕKQӻrTſ/ק>TWS컨󍻩~ }X< t^ŏb4tyu0>5Tfb#_n <"\]_$"eΈT6,箭pQFw y&ZeS(~PtR"j11ox^I[}FdwB^ؔ# :L{nN7J>wklV_݆n6.p MwtAcz VUDuO/?y`oV31( kֹߍkbs@/<(VΑτJZ{eJ !DV3u،_߿,0m4+xQ?~;7bunfJq5 ì13 Y7nr!{aHΣ#WTѲ՛|U4`r$n?e]>8^Zn4xhl/ׅE}p)e+ {ڋ S!79PN8ukuNHP6)ͥB}oǫ!Gð=͜m ZYWcvV֠U+k7*6r7h=R{^mtj4 $11=~ ldDȓY[XkߌM5Xt?3ϣB{"B ƈ{_z?E\Uӫ$-]0bsǩж涍aLb1mz#$l`A")=mL\1Ղ2$nFg! 1S7=bH`Tr2~l 2 М _XY7}Q0^cb:ɦVP!ĸIC5 LN mh&qpAr .fC"NH|:62VVÓiKq4z*726 GQreS-ܔc JF]F({0}`p:@ZJ?]ccu=3&۽RsR1c5T^i R ݸKI,*"V@ըsK6 è#DV{>X-/JH7x5H#[uΣ<|?x/~cԂà5 3%&[ccӚ@&CX 7*W{ލK8Z rL%mWҩɼ[}`wB^VT#ۡFvj11ox="N̻פdz!,䅛6ĄS=?DI󆭔tt  ` :*eKME'gW"\.Gue@{qmHU>|g۟::qd{^wemI0;SDY#൵2v3/r   )$裾̬*+k(9;S*uU]͇bz60O)agF[3XٌEl^q0QR_/d'D抲չkH$cMqD$$3ΤU|%dͳyZs{Y^BGKiW Ua.K)m+Ni若/:YT"jvw若K_\wv{euωfX۹+j %Jlun[ D"hڸ:f!EuK̻{*nwAîh}8=PxZ[vgMW?v)Nۣ݀8,+ Nwn i˃ [M eSVϽ4,_iGdD|۰V eWo*𭈬x:WuOE-7mϠ.;>j'Tb0Q߁WUe%V3MA[ 8dQ(Ss-;%Jra,x*ԇw[M| z.qɑmفgw#;ky: F-9/mBo6"͖kwś>rX/F)f۱{+eTn_v=~o.~̹,y'K:_]e}[D<&r@=vJ5WSil%rd% ^;<%(T owӅ@Y,ް(6BFD!1N棔4 ]2`:mZūgHiuݩT^y0)}bFѯnŷ-6RVRAQHc~=j.kMR-JhKEKG!.-TW]mKiЪN%P|76|mvLߨQ`?5O PVwd^׳nB x F\ZԚw յ&%ۙ.c\M'iqeՙ` Ԭqlyf.0Rgq%ƽC͉a:2f!` QSٝX"Cv':)PAqɆ"Ǩ%N(* (NW1f'.Je ,fEKeCUuN86E/FDMKmuA`I3qHJYG@Dv0:.DIfJ^O%p Kfb]/_rN?*lX$^/-ȊTT?u ۯw?~sr3T}eYp%+B?ғ_<+nt]}2R=[gu3LV+ p 7Ek`'o,f]-Mّc󇮨R3kݶ^Sì*sUAb$9{@95;Qa~Namx1ew=^8)3?savSKP̾\ddS!ЀY*G4x PMTE5Y>r(o&7yXC77ٱJq.0cH?x7 U[L7:im9vCX7>HbC_(.-azll+FIsl#1<6j's[H\.Z)_%>'pT*Kp:Bx RF:,k& aɫ'hB$eTJ<31);VQ&㞞`NեWF/:;Y] []yXǪ? 2y{(v~ͪ2jmQ41> g }f !-@ ӹ@e[#UIk%1-H'aWBBV/кG],V MZi|V -u -]u[\qq/$O)IZF]5ҀT|5ܳ*DZY[~tnor7Ps)Ԃd^ 6h-DRyq5)8N&$C"ŗDᣰ \ l{[d~0;C j-x(]gc:Kiy4xx%` ^Nc,=FN)Zor/mAM@cKf|h!KM, ɜC@7=gkg}%z2YwpNǓ/efh.xss5d,l$ Ak~$|@hypW0~2!Ɠ˴@ӡp5g8'6+NPJ$KA펀@gnL9~Nz'ã,F=ATTԒ8Xk2Khf:4l )=!cg@b03UD0[CrK)1 ub;z0@ u<:Vh^VfǕ%+g(z>;tv Ibu}]v7FCeg@jݷUa,(G#.y>44&z'3GefX$iG^ЙRZiEɼ8AhHJУgXo,ԀF͠}x5tO2D1p0{M9MLQq`Ė^QrzP3 x" `" }1iq[jR89D^Ơ;Z=;cw-}FBfc&%uJyA0*0`C0X=Dr`/OpOZZ%s\$}P c  uՇlGcT:x뉽!DP3JbYoK +K`)`}wz#0c:3Ʋ!xLB-7^߆!kv8AKH5qYT6 Z?|L7ܽVtzޟO]!fV8ā{rW0A%9LM9/@fqe($(rV ?h2sTU_eZFaVHl!{cY?ل<gW| zV4-_W> E%^A M Xf]"Ve<̎KmM6`Ȅap@dIjzAs3\FGd6(aJGwx% UQ%{2k=+6UnMK}qŇatB$Z2QX$ +~mht2yyjɭbSfBao-6$;D;A7nOY#4mۍK i<'^}v$ 9O< v{+ZlZ6,Nph@X1?e!3TLzbWߪ7Zxo(:F =`)42נkx9v|3`y9j̍R{#5vj*L)P)Agv2"zEH=aÈ2_#KWM8Gl+<%3tꫀ:JY3.DW0[w=D' Zs-T%o$GCF˫;{qL.*5F}-wB8/| H)Pz-$=Ls q Yi-Ghq3/ A.ʑ8nGo0juSg\Js^HՀ{O|s=%Jl"F &杝Qv٭sdOٲ̨E4ĩpXƖ";<=|x~uB&\ meM $ZQ1S9~մ+:CБO"|1{y#0( IHRRIQm/.(0īa8h#;SCqC\h8P*> NAh):8)WS6f)aW:j?%KٕXo4ٴY8ǫ֏+ml`(MRo (@㣇w;eZ]csg|Ƹ'dAPx)(EtE(b){@ Ggf17T'x&w0P ={7K{AW?4jΔaws,;:^2Z@J=/i0QiJ!F͟ڙ&@ߗ4$nB4'r2'2a~ѺJ+IH0jδ)={fYmtqv1]Vi<1w&+IPt>k?3M} sr1; ro U>$/+D3%%^1w4j02լJTj)XOXSC &%J{X9>YOQ;lC) m e6Wv$QoKǕ\&yqZT]Ӏ6|?(\iM,S^"b}"JƾgUU'Xx MJ1egh/}Luc`wDUnzzbRQoij6Ӝ=.dV=$dTYicU{pE-;d=m0#9&X=.#t7/z-<X۲3#:euUJ|@YN{YI*8YW}#z J*0èՃ;SOat+J%4}U4&FOM :qS60mw=H-Wtҥ]PH!K<\}UƟ8h;Cl7n2.E>3?\tTvN:(Ay%.R);F}kymU3*{[o-2Ck&K+'KUU-9+ɕ:l]P+&Riʸ/%Fӓ㺊|M^jT}y $F6-; O6o$$NI&xCT}Rv#؞YLS dh 4}$j*Z@3 C#}l2@yADWtH^p?,~2zՓbJQ&~yyebdցLΦ:JT&<@;Rdh:c}x֫6#ֈ>:ENx.q&=jCu;#hC ~ 61:ry%!1I'+dIi`RІ 3j>/PCXyreVAhYрFzt$wёJmDm?| 5LCh洤+q}bՆ =BmG(xC})R'v=u֓@EF/'=Mx^iYhq}pÓt7FيڅR}{"?tiù8ů5a$ErW'E'+vKz|?r=zwn>^/tZ꿾|L2SZ/嫮RN6Svgo`vAw 9P5.e+ˁa~Onv{jyw!zJ}aǚG[z6<7l8=rkzzl<d?/nrSS;ro:P/L!݌C gZR)>嫢'̏TUFkMwz.wِ8}j;}'XG6tϟr9>cY?q嗋u~%wkr>m<l׬6{$^яY~JaӓmF%{T/EZ|g/YES/i1Ο|jsq?W6?fEt 6[)k'}ڜBO$LF'( fhELTy+s`蔀h1Ujmy jVk }_^nV},О i.&'|JgbrY\s>B%ʢSRd ƖFnpa3;%8*Z1 :X'DȎ8$ 4%I' ]Y ؈0UAjֽ.#E(C<oWhK~_@z+7P! #*XՉ9LXF@_|;`w; N7o$x?{AB0,; %4J9!jB>&#yr6уQ .wVV\GsFom*X,NisB,pr>/WhSdͼJ2Ki 4fz$2q(_2ÏO>h7g-] Z|}D@GZ !7FᢇS?"jzvUi]qḀ뛗Ӊ:Y2=`p#/x4uҊd*}3e˜,} HAY{CQdK#/||PTRlo趡:Z4c]=fKU(b P$Ed篈!ʢgg畎`9I=8 uFRgGȺl<|op+|C>?^Ų-`Lc3Araɭ[m},ld-ۛNqH/OUHR<0*L b|IH318-%NL ZkѤ4$TL]Ȫd ϧup=ɦbI/zRAI%ϫE:KoCj'ڗ2c/!P 4پ/ s>ȵ RO-DH("tL) V<쫉iEx[5ުL_;~GLt݃Hȹ@ wVX&Z"M8jު\D}u[QFQծrW"N"iA  P2HJ+Φh@{>Z~ǻ`00םJE䵎1UY*)ɉWϾ7^ e |Bz8z$5q&OngHTQZKdؤ5ep`CoN`} x5"a^C6*+Ey,)JȂz/bs*1\f@^JJ< " d=X ;'w) wyCP%h@Koe4n9ai"MLh@Id_$r!'ܯR+\.G('%#RZ:iH7VP:?F!ZD]/T!km&r>^U(΋ȉ)cN%jU3`£͏|tȬTjAX-GRI^Nɸ`I66Qyncu`7d"blÙQSqwFy2`Z]N̆iτd9>r ?+S6l`wTû<2)EfZȷڱz&W.uYA(:fcMByMkQ3sس5[Fkayj[T^S48a%OLf x~)QΦrTk=QI=06>fePLzq!ROA-‡B-#~Q$[@>ʾl,7v7PG p.4XDEΰh_:Rz*@NB#,2pGॳHxDȑk,@I}ZfׄnkasuR'?NR:*j=f?RW=Lϭ;a۵΋y)ZlFY'+)mOcw ~|uRtDJEU+^ W Y8Ʒ\Qn+yΘw۬?0kqy5˘w:2^OBg%؈:'6>x_9/k!tHVÛl,s1X2͠~{V$4ٴ]-v>]IpPMdOhybRwB:4xo'TVg%k$r4#eD.2DE`e!^FvsfĮ΢YIl5w3d^Քo$G/DKiOS==9\Vm=? x͒@I6qZVmdyZ/Y=e?^%v$JkCx#]GfWMLYmE^t$zzЀ8_|{J-?׫VQ؆,"d-+t0m]; :ܑ 7}}|v4xc!6EztfWtJ(آ%wBܪD!6m"d?JokoVƾ~='MJٝmì7Ӗ^tT~RqsJY;kvb^tNm vBJMO5nJI״];}~utT~RyCf#9T~RPUJZJu?h;@EwğB>JgLn?Ҹq w~ɍ+OP<*+hCKm.?~zuߝɱP,*)3yeE(!dϮ,Pq[~n%wřR<1*ocQ99[%~}ߙ;̖q!h}gvN| OĕWs1@hU+=Nu'Ǧa+4qW{ГV;+ uR³'57'}!Mw6]*`2V9mt30>Vd.uI1WCm8mӷdRSH?4~dڻ@RD%xP3ѹ]Oٞ Y+ԋi[-&ͻ޽ĄbGsr4_g߼4to;nn/1B.^e){AUv:w[&-%^@ Nfz~jzPmym~5|>Ӻ@1x|1D>S`tM= XKu1_lW/m*P^VɈ*@ LT!BCF+w_]?-ol\Qy|yU#kv;{8hw8 Y!~(5;iJ[@B*)C7_BR>?):pF+bV1ٱmxȰXm޾9?/ ͙cc)h"fm7T2X6Tsiw8y~^.:1yH&%x^Ex.DE?~|&|E6vNr:@)JWv";5u%A8\ݴ>g`쐔! 1v~!ECa=ӋٗGtͼ:qZ:>u煮XBVfN߳ lQ/#:?&IJ lP'Z26bjYMgYڎYø|v@ '.Y2EH6{&:$LbҳdrE+>u%{LBq"\=»"v4AM$g+Ekb˟vt?)&xe—F8֘X[icC1 ynErVVuF4cn>#j{ z?w(W Hj,հQDBWe⨣l@'v%$Б\48mBRhǦӐDJ>*v 7ޢ;5\~e6e^ I|)=OWAFd?mɱݫI|9;ؕ(T!d/xm,J9`nejj8v!-"g&%/|!CRSp>FO'Eq:9i~\m~O.yGO⸫P*F _٩m}&6!7%Gy`pUK*l]R,>3FRF8U$b=Χ]yd/N*҉e'W&`gN@m.Ƨ$oa4ٷʱ#F.ˋʯ:kQMeBu-do}yc”=;q@],:Kѳ^C+Nyyﮯj}eY6$º#GrCmahcJT'[CJ~e'5LG߈af1#Ϣ* \9¬*; 7n oo݉fY{Zoc[fU4<4 V+RcPe!o%Q% ixetϕ4H/* ixo7/) ?va`QqKVr]5˛W;˼!@(?n[5ƈf߸wYF#7ڮǸߛ\bTd3бYa/Jd}ZCCgz6YNfʹ᯼<`Vv Vg,p0T-г=9D`E=r`Sͪ ]WfȩC%3(m-4 p|A&_Tڳprw/SMBs5ՙ#rT"$bp|vzݧj凚wș? KE5n•Ð0 CMJ:fh^CpO(䑲ܲ48Uff-p_E$ #)sʢZ&Qu)0Բ%z{~>X_votozcNq~V4nCmOQ̲C >G(r!X<`Hx/& wȩwn%$7w~vgld2k=|wsGnO:KSquOfiK'Њt1}fj6>~s 'ZLy|?}pSNcfJfM5]͙Ekz͙n,$p\X]H^XWiQ S8yQ9Yp~i(㪔))&7 9![9TCݨq$S{R̶"N(M֌RO+40eECխB6P!yռIAT΂+nXlp%ԆA5_fijɭ ?'!@k<[h SݕCBр@A瓳+w) O0ΎƳN1_YRv>}0qRpֹ"Wy\3[/:g?krYӒrͲFj.Mjz -W1_:Մ̻e3ڰWnY6чM6V{n21wxXSݲnmX+7m }d7Ukw;ן;Q:- cߕwvxH"V+evKpX^mh+}{勰Ɠ/a^.)̀,OgBŌpxYE žQ`ȏQ}N5e 7燀b4<7_×>F:Ք6NެY8#Q-s#YiG4= l eB(EJ R Lj) *./++&_QiVύeB*y|}yGhxzY]z8??nW<'|XތTNdi(?Cf-423ϛMD-/>٠1loLlQKoܟf(E"~xpB㾱/WfD>T& M$x$~< |VR3}x-]}2u%%R6Хvdm:ߥ!nT>nzS5 ZڐT}6:=btMz<WcMZy1gSъxyahQqV S%BlRU*!clCݠVL*Ȗ92Ԡ 7zY8 Y)7 ltTYT˂@k@3*VI9fh;KUʼ 90X)şHb"lA9=D y1[fD#eHճ2fR3Wma/%b)P_9)Qrw΂WbGBAd#6_\;|m%IR[@qpdGF+J . گ %_ַ]w7ߟ;7[zU> ??f@j[/Cn ?O?~{ d:[w5B=~L1Wb:G=:AJ 73#֒ju2/ 6"T[30]AQ1ՃtebqX+h*p}3'#.z픈^B;i@0 <{mލҌwNu.yˁ|) D)as^ z2l{(CFФ%P>k0Ǧk-í6]W8ksyAqb\cE+p{9BK(#VqbG¥OԖBKcR<҄ IDC{h\7dKQEȜP& POsg*  q7t;xNp T}@o] T<<^{s/×a+:ci* Q~ģK}Q Xq7FAO' q7 P7n~lv{vzJAKϴ2t~u`eq5 zshq5,, !/;Pw|Z5ҽ;@%[G^]CfUE7Z=!XaP.Ri Q9sg.` UHTQ ֑֩ThPTtQ%vfա>N:5еPMc5ƉgL̚>[%kS©S)0E- TXtRx/3M$U͍ ;דy0BNoc[iī=xW CVC+6䲪Fj)JN;ޱ%-C{i߼P]O+[meV\-J?(B|*'+r_4A= k]AmOՍ$GP5JX^ӗo~)I' 1nM$fD1IW2uq"mscj[V WI1cEJ3 SjSY+hw%(i}V֣dTz4̒VR̂"ej(ТDhM"4*8O6Zc`q` 17-t-t9{:.]+<7B/ohPEQx%JrL#蒕),<&\YyΩ3 h))FעU<ݫxà9*޺Ha2fc%<+/xhJ FR ozx*:Wx#WQsh9u Y[ `@RE4! Dj\"ԻDĂEz#cȥXbA!JPQBFYIYb\I JpRJ,7x 8@X_R#w"%DnWCto\ CLk~8ԋ9 *W<F88<7bzi@0"{mލz#LF`j#=a-/wϲDkʤ{38FFͣVȻ.;>1 y!hKF[şQ=M[!WuƓdo?I'/ᦦÉdy2l&Y\W;a/"Z4j:Ǥ?ȊƤ?}m;`RiT+ dbRN`R$tG̾؟jD?d!:A GDqv(cZ'8|jeԃQnSA5_'ƃp^ֆB& no5`Sݕ^ǟeK{?"9vA;PnGΎ& 4k ?`NnVjER}Ȏۤ9nFsn=sS g?kr~ӒrͲ)>nZ4*!zn21wxjik*ڰWn96%G͐U-W1.c`-#һa!D)&Dz:G@-)1eP˸@-cF j# ((MȂ4/ʉ-'Jz}WڈNWPK9%PW#Q}z}jx3̿Dot>2\H^V6Ls(<-2TiPJrazeKYf\%Y. Sl" (opǷ;^?<|݊+k1+D*U DR[,2eATb%k-HBтȇ ގf`j'k-k4q rrOE*FU5T 3!F\OӔBDg-Q׊&D RQvTQ^.)í s0;v<^]le( F ,T Ǵ)Tmj>}:cf/s,;/?࠼$9Z%6=:">zK:l x9aPC{ByxΞW| wZa\:x]jPu:tyxcn_--g} od|;|FLI# Ў 1, 7 d#ɚwqdD鄾ǻሷ·[ M4Ȧq{7n8wxn#"8(>\\⚻E/D۔[掩*qJZ9t1]b=V%nj}sM=";xjb/S#bh;zxjcHlzã狥J(Nj 49z0~KJ1^.b`N)8]Ur"9.Ѕ9.I]R(̈́T*gTgTCQp8NJwDăz\=+kg!=UR@Ƌ,gZ42DH3R\s$cҤ}S]0(4ְ%ؒ aΡݓH >vL-`|MTM%!Ąwix LWZ-K2EyS+U\ťF!DΞ|V[ R[2bQDn=7^QIWѫżW7{qbW0{C1.MAKԤx9q<_j4k-?.sЦ="JAx@YR(xw4 `F<1J'$Jȇ$y$OUeh- \Zivpy–Cq)%db1<]3ya9(d&J EZh;6ܙ5{]g`VZ.mlq&sYLʖv鱜?'*&>NO1SdgR(yZv[*jj?LvnkGfvپrɽNywF|.ޒԆ%x/7BK5/ˌ<+(2HdER,SYjVH a,gO7hdլ:v S))CqvsL(Ԋy "KiSQ*#dN %JRLʔIPftDuZHL5Ņؾ $9nfqecgP `,? h|3,vgqׂ;j&<ά7Nl]QشCktiS45/iu[M@M6{13\OVcNiIZJLD6 ~湖*MiJ5̔Lȴ$PDl#Z*NYc;5NCG4չ+#S{Oٵr_$tQgWh_5_ۿ\M㨳Xwn'7`6{m_tX틺b--/n_bDھШW5abPN >E wJ頔 -,zCӯ4DjX+Su [Ɇ 2sy+R q Kjx8HibosBChKMYeD.P \CÅ>^:qwԕ =|&bS+W{ֽ[[(.6^9oCbXnmIE._q*_qZ\yno8I+ʤ( 4)uk(2CA aR"%fJ\eKjXªfz,q.!ոQXt;ɠ"Q8_^g{ ˣyQbGхbFr7}1tFXDpN%JL*D*| c)1 `9!(twHFܝ2cw"Ԭ84ͼn :D%Zb%!fY$2M3H a7)u]H6V\^eўĠŷ&Ǒ#}N*CCN ]4b=x`9 1 Olgbjd屇3`B7=Fw޴r #ʐ_2c?}dī &8{U% >:+lp wm`cbES%db扄r0\>sq vtO>M]ؿT\RX᧳|1ټFS$^Z xi-i'W -p#Dr%f I1V(g['@˜hR4eJW۟*ЧɅ- uV,l(z(k_-vAUfl q_7U_*v$-$>pP^2&STp*IJ9gB"¤,%ԩB)@LBTҜQ:[tXK{]^yWo̺BlzR<ձOӸ.p^K= \|ѠyXV|uk]z` fH;|*)N?ꊂwn|y; ~|3d3=RR3/ߞܞL?+c:2u~3J(n_p40gL(֜j^B }VyrrrGPONOX9{d$k"6C 4F#YGlI<b$ 'ǛmG2őqێ7_΁-(gÑtw$2uY($2~&yܫEd ЃRAl^9vLf|`m)3[p0II7 TE]b9W0kQp:1(pG(.<~  bv}uIP}OQk)ȧtam+ Ȋ':'7XX]݋Q% &z>rF/^aM*uVI NIa2N932 #%LISD5*Tz)?y#lvS ͍[!:zf/>8Vh9BX*XP>?^<7Yrgf"~;]_= 15019ms (19:49:50.695) Jan 20 19:49:50 crc kubenswrapper[4948]: Trace[210143685]: [15.019280633s] [15.019280633s] END Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.695300 4948 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.695918 4948 csr.go:261] certificate signing request csr-g5ddb is approved, waiting to be issued Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.695964 4948 trace.go:236] Trace[897108543]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (20-Jan-2026 19:49:35.855) (total time: 14840ms): Jan 20 19:49:50 crc kubenswrapper[4948]: Trace[897108543]: ---"Objects listed" error: 14840ms (19:49:50.695) Jan 20 19:49:50 crc kubenswrapper[4948]: Trace[897108543]: [14.840260654s] [14.840260654s] END Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.695975 4948 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.696450 4948 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.697750 4948 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.704153 4948 csr.go:257] certificate signing request csr-g5ddb is issued Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.744877 4948 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": EOF" start-of-body= Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.744932 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": EOF" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.748975 4948 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33206->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.749037 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33206->192.168.126.11:17697: read: connection reset by peer" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.779318 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.788561 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.789210 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.798626 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.798685 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.798765 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.798800 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.798835 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.798862 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.798903 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.798975 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799008 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799013 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799035 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799034 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799067 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799091 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799104 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799140 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799168 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799197 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799220 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799254 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799270 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799291 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799326 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799356 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799418 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799418 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799451 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799457 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799483 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799527 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799579 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799613 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799620 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799651 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799684 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799730 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799745 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799808 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799810 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799862 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799875 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799903 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799923 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799941 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799946 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799957 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.799973 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800008 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800032 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800049 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800064 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800069 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800118 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800122 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800149 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800167 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800185 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800204 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800220 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800217 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800239 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800262 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800275 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800281 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800313 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800332 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800351 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800370 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800384 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800399 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800417 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800434 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800448 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800456 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800450 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800472 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800496 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800518 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800538 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800557 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800563 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800575 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800627 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800641 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800648 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800658 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800682 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800693 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800753 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800758 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800795 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800811 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800820 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800824 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800893 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800913 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800925 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800961 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800966 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.800991 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801018 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801046 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801075 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801073 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801111 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801115 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801149 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801170 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801188 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801206 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801226 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801243 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801259 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801276 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801293 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801310 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801328 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801345 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801362 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801378 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801396 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801412 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801427 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801442 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801458 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801476 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801491 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801508 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801532 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801549 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801568 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801584 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801606 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801676 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801693 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801723 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801739 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801754 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801771 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801787 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801803 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801819 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801834 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801849 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801865 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801883 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801902 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801917 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801935 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801952 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801969 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801985 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802001 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802017 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802033 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802050 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802066 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802082 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802098 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802112 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802128 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802143 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802158 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802174 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802191 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802221 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802239 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802259 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802274 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802288 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802303 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802320 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802338 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802355 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802374 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802390 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802405 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803232 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803377 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803396 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803414 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803430 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803453 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803469 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803484 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803501 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803517 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803534 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803553 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.803571 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.805500 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806315 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806353 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806374 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806392 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806515 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806537 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807089 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807117 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807139 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807158 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807178 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807195 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807213 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807231 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807297 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807317 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807337 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807355 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807374 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807394 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807411 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807428 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807473 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807495 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807516 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807536 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807553 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807570 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807588 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807607 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807628 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807645 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807662 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807683 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807716 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807734 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807773 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807798 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807820 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807840 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807869 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807887 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807908 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807927 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807946 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807966 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814387 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814457 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814485 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814515 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814636 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814661 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814676 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814687 4948 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814700 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814731 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814742 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814754 4948 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814768 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814778 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814788 4948 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814800 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814811 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814828 4948 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814840 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814850 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814861 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814873 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814883 4948 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814896 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814906 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814916 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814927 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814938 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814953 4948 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814966 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814978 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814993 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815008 4948 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815018 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815030 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815849 4948 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801286 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801476 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801501 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801621 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801777 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801904 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.801959 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.802052 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.805294 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.805648 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.805803 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.805999 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806531 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806541 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.806764 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807860 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.817921 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.818113 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.818208 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.818301 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807881 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807972 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.807980 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808098 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808269 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808285 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808374 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808457 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808483 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808521 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808599 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808684 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808867 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.808901 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809095 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809146 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809149 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809290 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809371 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809467 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809568 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809744 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.809842 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.810452 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.810724 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.810803 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.811281 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.811299 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.811417 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.813223 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.813627 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.813832 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.813936 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814202 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814347 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814388 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814611 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814627 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814834 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.814978 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.827112 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.827314 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.827734 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.827885 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.828182 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.828333 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815065 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815780 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815891 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815872 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.816051 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.816097 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.816367 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.816396 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.816889 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.817094 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.817313 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.817750 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.818977 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819053 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819109 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819225 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819415 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819460 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819514 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819541 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819802 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.819869 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.820011 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.820302 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.820000 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.820346 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.828987 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.820491 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.820570 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.820823 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.821027 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.821460 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.821549 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.821803 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.821811 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.821984 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.821923 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.822022 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.822368 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.822393 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.822641 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.822971 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.822994 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.823403 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.823663 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.823988 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.824144 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.824247 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.824645 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.824802 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.815010 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.828405 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.828553 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.828686 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.828905 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:49:51.328873999 +0000 UTC m=+19.279598968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.834495 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:51.33445706 +0000 UTC m=+19.285182029 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.834581 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:51.334571903 +0000 UTC m=+19.285296872 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.835796 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.836354 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.836847 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.841345 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.841408 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.841521 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:51.341502619 +0000 UTC m=+19.292227588 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.838787 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.838798 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.839999 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.840253 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.840430 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.840469 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.840555 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.840622 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.840764 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.840933 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.842178 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.842231 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:50 crc kubenswrapper[4948]: E0120 19:49:50.842299 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:51.34229074 +0000 UTC m=+19.293015709 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.848294 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.849090 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.850187 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.850472 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.850801 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.851046 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.854069 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.854098 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.854198 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.855188 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.857849 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.859526 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.859830 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.859902 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.860496 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.860523 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.860787 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.861009 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.861524 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.861684 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.861924 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.863749 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.864390 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.864534 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.864966 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.865019 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.865111 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.865240 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.865427 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.865486 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.869391 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.869690 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.870278 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.870328 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.871033 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.872411 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.874043 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.874581 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.874782 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.874887 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.876715 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.883663 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.883667 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.876125 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.902273 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.903853 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.913452 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915517 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915653 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915797 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915674 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915917 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915934 4948 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915945 4948 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915955 4948 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915964 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915973 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915983 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.915992 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916000 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916009 4948 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916017 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916025 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916034 4948 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916042 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916050 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916058 4948 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916066 4948 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916074 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916082 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916091 4948 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916101 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916110 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916119 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916127 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916135 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916144 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916152 4948 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916160 4948 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916170 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916179 4948 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916187 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916196 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916204 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916211 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916219 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916228 4948 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916236 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916243 4948 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916252 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916262 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916271 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916280 4948 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916288 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916296 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916306 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916314 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916322 4948 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916331 4948 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916339 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916349 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916357 4948 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916366 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916374 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916382 4948 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916391 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916399 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916408 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916416 4948 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916425 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916434 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916441 4948 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916450 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916458 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916466 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916474 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916483 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916491 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916501 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916510 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916518 4948 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916526 4948 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916534 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916543 4948 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916551 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916559 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916568 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916576 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916585 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916594 4948 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916603 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916612 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916621 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916630 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916640 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916650 4948 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916660 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916669 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916679 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916688 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916698 4948 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.916736 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917012 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917020 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917029 4948 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917037 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917045 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917053 4948 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917061 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917069 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917078 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917086 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917094 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917102 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917110 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917118 4948 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917127 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917135 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917142 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917150 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917158 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917166 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917174 4948 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917183 4948 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917191 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917198 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917205 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917213 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917221 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917229 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917238 4948 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917245 4948 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917252 4948 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917260 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917269 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917277 4948 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917284 4948 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917291 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917298 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917307 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917314 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917322 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917330 4948 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917338 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917345 4948 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917353 4948 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917361 4948 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917369 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917376 4948 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917385 4948 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917393 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917400 4948 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917407 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917417 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917424 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917431 4948 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917439 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917447 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917454 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917462 4948 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917472 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917480 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917489 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917497 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917505 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917512 4948 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917520 4948 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917529 4948 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917537 4948 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917545 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917553 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917561 4948 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917570 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.917578 4948 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.925332 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.933747 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.940948 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.950103 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.957034 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.965854 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:50 crc kubenswrapper[4948]: I0120 19:49:50.974985 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.110264 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.119655 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.129652 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.139392 4948 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 20 19:49:51 crc kubenswrapper[4948]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Jan 20 19:49:51 crc kubenswrapper[4948]: set -o allexport Jan 20 19:49:51 crc kubenswrapper[4948]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Jan 20 19:49:51 crc kubenswrapper[4948]: source /etc/kubernetes/apiserver-url.env Jan 20 19:49:51 crc kubenswrapper[4948]: else Jan 20 19:49:51 crc kubenswrapper[4948]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Jan 20 19:49:51 crc kubenswrapper[4948]: exit 1 Jan 20 19:49:51 crc kubenswrapper[4948]: fi Jan 20 19:49:51 crc kubenswrapper[4948]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Jan 20 19:49:51 crc kubenswrapper[4948]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 20 19:49:51 crc kubenswrapper[4948]: > logger="UnhandledError" Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.140524 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.146569 4948 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 20 19:49:51 crc kubenswrapper[4948]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Jan 20 19:49:51 crc kubenswrapper[4948]: if [[ -f "/env/_master" ]]; then Jan 20 19:49:51 crc kubenswrapper[4948]: set -o allexport Jan 20 19:49:51 crc kubenswrapper[4948]: source "/env/_master" Jan 20 19:49:51 crc kubenswrapper[4948]: set +o allexport Jan 20 19:49:51 crc kubenswrapper[4948]: fi Jan 20 19:49:51 crc kubenswrapper[4948]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Jan 20 19:49:51 crc kubenswrapper[4948]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Jan 20 19:49:51 crc kubenswrapper[4948]: ho_enable="--enable-hybrid-overlay" Jan 20 19:49:51 crc kubenswrapper[4948]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Jan 20 19:49:51 crc kubenswrapper[4948]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Jan 20 19:49:51 crc kubenswrapper[4948]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Jan 20 19:49:51 crc kubenswrapper[4948]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Jan 20 19:49:51 crc kubenswrapper[4948]: --webhook-cert-dir="/etc/webhook-cert" \ Jan 20 19:49:51 crc kubenswrapper[4948]: --webhook-host=127.0.0.1 \ Jan 20 19:49:51 crc kubenswrapper[4948]: --webhook-port=9743 \ Jan 20 19:49:51 crc kubenswrapper[4948]: ${ho_enable} \ Jan 20 19:49:51 crc kubenswrapper[4948]: --enable-interconnect \ Jan 20 19:49:51 crc kubenswrapper[4948]: --disable-approver \ Jan 20 19:49:51 crc kubenswrapper[4948]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Jan 20 19:49:51 crc kubenswrapper[4948]: --wait-for-kubernetes-api=200s \ Jan 20 19:49:51 crc kubenswrapper[4948]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Jan 20 19:49:51 crc kubenswrapper[4948]: --loglevel="${LOGLEVEL}" Jan 20 19:49:51 crc kubenswrapper[4948]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 20 19:49:51 crc kubenswrapper[4948]: > logger="UnhandledError" Jan 20 19:49:51 crc kubenswrapper[4948]: W0120 19:49:51.147254 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-69150fd3935f2fc8f1ca8cb84069d383a21d9f38a9b938e89718007510ea857c WatchSource:0}: Error finding container 69150fd3935f2fc8f1ca8cb84069d383a21d9f38a9b938e89718007510ea857c: Status 404 returned error can't find the container with id 69150fd3935f2fc8f1ca8cb84069d383a21d9f38a9b938e89718007510ea857c Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.149125 4948 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 20 19:49:51 crc kubenswrapper[4948]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Jan 20 19:49:51 crc kubenswrapper[4948]: if [[ -f "/env/_master" ]]; then Jan 20 19:49:51 crc kubenswrapper[4948]: set -o allexport Jan 20 19:49:51 crc kubenswrapper[4948]: source "/env/_master" Jan 20 19:49:51 crc kubenswrapper[4948]: set +o allexport Jan 20 19:49:51 crc kubenswrapper[4948]: fi Jan 20 19:49:51 crc kubenswrapper[4948]: Jan 20 19:49:51 crc kubenswrapper[4948]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Jan 20 19:49:51 crc kubenswrapper[4948]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Jan 20 19:49:51 crc kubenswrapper[4948]: --disable-webhook \ Jan 20 19:49:51 crc kubenswrapper[4948]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Jan 20 19:49:51 crc kubenswrapper[4948]: --loglevel="${LOGLEVEL}" Jan 20 19:49:51 crc kubenswrapper[4948]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Jan 20 19:49:51 crc kubenswrapper[4948]: > logger="UnhandledError" Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.149860 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.151054 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.151095 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.374210 4948 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.421994 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.422067 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.422093 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.422118 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422140 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:49:52.42212021 +0000 UTC m=+20.372845189 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.422169 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422215 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422232 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422236 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422284 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422294 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422301 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422311 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:52.422294675 +0000 UTC m=+20.373019644 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422247 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422333 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:52.422326166 +0000 UTC m=+20.373051135 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422358 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:52.422353006 +0000 UTC m=+20.373077975 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422244 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:51 crc kubenswrapper[4948]: E0120 19:49:51.422388 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:52.422382777 +0000 UTC m=+20.373107736 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.505174 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 14:15:16.306783617 +0000 UTC Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.916367 4948 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-20 19:44:50 +0000 UTC, rotation deadline is 2026-12-07 07:49:52.750247613 +0000 UTC Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.916408 4948 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7692h0m0.833843333s for next certificate rotation Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.919689 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5b338ea0bf3e1ed831e2af76b7d71d39dc41f9a34a5e4382f8f573e33673c291"} Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.921284 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"4896fbe384310c7851c7a81273d81b4f5c9a5837101c46cc89dfa2d77aa5d6ed"} Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.923385 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.925211 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d" exitCode=255 Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.925275 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d"} Jan 20 19:49:51 crc kubenswrapper[4948]: I0120 19:49:51.929608 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"69150fd3935f2fc8f1ca8cb84069d383a21d9f38a9b938e89718007510ea857c"} Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.004603 4948 scope.go:117] "RemoveContainer" containerID="095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.005129 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.005608 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.005652 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-tx5bt"] Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.005963 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-xg4hv"] Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.006223 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.006526 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-tx5bt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.018900 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.018963 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.019087 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.019185 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.019256 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.019329 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.019390 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.019472 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.019588 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.040255 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.063179 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.085970 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.104270 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.119783 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d2ed1457-1153-41b5-8cbc-56599eeecba5-hosts-file\") pod \"node-resolver-tx5bt\" (UID: \"d2ed1457-1153-41b5-8cbc-56599eeecba5\") " pod="openshift-dns/node-resolver-tx5bt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.119819 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-proxy-tls\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.119866 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ks7vm\" (UniqueName: \"kubernetes.io/projected/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-kube-api-access-ks7vm\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.119891 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4wlr\" (UniqueName: \"kubernetes.io/projected/d2ed1457-1153-41b5-8cbc-56599eeecba5-kube-api-access-d4wlr\") pod \"node-resolver-tx5bt\" (UID: \"d2ed1457-1153-41b5-8cbc-56599eeecba5\") " pod="openshift-dns/node-resolver-tx5bt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.119906 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-rootfs\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.119966 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-mcd-auth-proxy-config\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.131613 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.142151 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.154129 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.172305 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.194980 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.214004 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.224161 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d2ed1457-1153-41b5-8cbc-56599eeecba5-hosts-file\") pod \"node-resolver-tx5bt\" (UID: \"d2ed1457-1153-41b5-8cbc-56599eeecba5\") " pod="openshift-dns/node-resolver-tx5bt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.224227 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-proxy-tls\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.224281 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ks7vm\" (UniqueName: \"kubernetes.io/projected/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-kube-api-access-ks7vm\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.224305 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4wlr\" (UniqueName: \"kubernetes.io/projected/d2ed1457-1153-41b5-8cbc-56599eeecba5-kube-api-access-d4wlr\") pod \"node-resolver-tx5bt\" (UID: \"d2ed1457-1153-41b5-8cbc-56599eeecba5\") " pod="openshift-dns/node-resolver-tx5bt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.224325 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-rootfs\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.224389 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-mcd-auth-proxy-config\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.224819 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d2ed1457-1153-41b5-8cbc-56599eeecba5-hosts-file\") pod \"node-resolver-tx5bt\" (UID: \"d2ed1457-1153-41b5-8cbc-56599eeecba5\") " pod="openshift-dns/node-resolver-tx5bt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.224881 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-rootfs\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.225584 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-mcd-auth-proxy-config\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.230892 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-proxy-tls\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.233499 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.245664 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4wlr\" (UniqueName: \"kubernetes.io/projected/d2ed1457-1153-41b5-8cbc-56599eeecba5-kube-api-access-d4wlr\") pod \"node-resolver-tx5bt\" (UID: \"d2ed1457-1153-41b5-8cbc-56599eeecba5\") " pod="openshift-dns/node-resolver-tx5bt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.247934 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ks7vm\" (UniqueName: \"kubernetes.io/projected/6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1-kube-api-access-ks7vm\") pod \"machine-config-daemon-xg4hv\" (UID: \"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\") " pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.251764 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.273111 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.290947 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.314157 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.339122 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.352974 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.374643 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-tx5bt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.394942 4948 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 20 19:49:52 crc kubenswrapper[4948]: W0120 19:49:52.395143 4948 reflector.go:484] object-"openshift-dns"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 19:49:52 crc kubenswrapper[4948]: W0120 19:49:52.395742 4948 reflector.go:484] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": watch of *v1.Secret ended with: very short watch: object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": Unexpected watch close - watch lasted less than a second and no items received Jan 20 19:49:52 crc kubenswrapper[4948]: W0120 19:49:52.396169 4948 reflector.go:484] object-"openshift-machine-config-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 19:49:52 crc kubenswrapper[4948]: W0120 19:49:52.396198 4948 reflector.go:484] object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": watch of *v1.Secret ended with: very short watch: object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": Unexpected watch close - watch lasted less than a second and no items received Jan 20 19:49:52 crc kubenswrapper[4948]: W0120 19:49:52.396757 4948 reflector.go:484] object-"openshift-machine-config-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 19:49:52 crc kubenswrapper[4948]: W0120 19:49:52.397075 4948 reflector.go:484] object-"openshift-machine-config-operator"/"proxy-tls": watch of *v1.Secret ended with: very short watch: object-"openshift-machine-config-operator"/"proxy-tls": Unexpected watch close - watch lasted less than a second and no items received Jan 20 19:49:52 crc kubenswrapper[4948]: W0120 19:49:52.397105 4948 reflector.go:484] object-"openshift-machine-config-operator"/"kube-rbac-proxy": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-machine-config-operator"/"kube-rbac-proxy": Unexpected watch close - watch lasted less than a second and no items received Jan 20 19:49:52 crc kubenswrapper[4948]: W0120 19:49:52.397132 4948 reflector.go:484] object-"openshift-dns"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-dns"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.544662 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 20:09:17.704171268 +0000 UTC Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.544840 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.544915 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.544941 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.544958 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.544978 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545085 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545098 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545109 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545151 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:54.545138411 +0000 UTC m=+22.495863380 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545380 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545422 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:49:54.545414618 +0000 UTC m=+22.496139587 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545449 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:54.545432748 +0000 UTC m=+22.496157767 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545452 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545468 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545491 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:54.5454861 +0000 UTC m=+22.496211069 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545494 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545506 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.545554 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:54.545539741 +0000 UTC m=+22.496264710 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.570869 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.570998 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.571058 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.571099 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.571137 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:52 crc kubenswrapper[4948]: E0120 19:49:52.571174 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.576751 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.577383 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.578630 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.586238 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.586941 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.587467 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.588228 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.588797 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.589396 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.591020 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.591510 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.592779 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.592964 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.598848 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.605163 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.607570 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.608309 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.624822 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.625635 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.628137 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.628572 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.629763 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.631450 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.636829 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.637618 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.639751 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.643560 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.663067 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.664478 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.665349 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.666833 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.667641 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.670319 4948 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.670439 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.674357 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.675828 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.676326 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.679294 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.681694 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.682455 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.683898 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.684807 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.685442 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.686649 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.687902 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.753803 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.754576 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.756365 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.757547 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.758342 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.759327 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.772290 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.772843 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.773962 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.774520 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.775406 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.775863 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-qttfm"] Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.776828 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-ms8h8"] Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.776905 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.777631 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.784094 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.784273 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.784518 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.784567 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.784530 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.784750 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.784821 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.785341 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.823840 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941456 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q6jt\" (UniqueName: \"kubernetes.io/projected/c6c006e4-2994-4ab8-bdfc-90703054f20d-kube-api-access-4q6jt\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941590 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-system-cni-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941608 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-os-release\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941627 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prr4t\" (UniqueName: \"kubernetes.io/projected/e21ac8a2-1e79-4191-b809-75085d432b31-kube-api-access-prr4t\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941646 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e21ac8a2-1e79-4191-b809-75085d432b31-multus-daemon-config\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941661 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-socket-dir-parent\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941676 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-hostroot\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941723 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c6c006e4-2994-4ab8-bdfc-90703054f20d-cni-binary-copy\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941736 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-cni-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941752 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-cni-multus\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941770 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941788 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-cnibin\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941829 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e21ac8a2-1e79-4191-b809-75085d432b31-cni-binary-copy\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.941859 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-conf-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942019 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-netns\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942075 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-multus-certs\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942147 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-kubelet\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942176 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c6c006e4-2994-4ab8-bdfc-90703054f20d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942196 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-cni-bin\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942227 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-system-cni-dir\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942246 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-os-release\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942300 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-k8s-cni-cncf-io\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942323 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-etc-kubernetes\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.942346 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-cnibin\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.964403 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d"} Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.966208 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"3bbf9255540d0676fd063d6a33c763b404f06437ffe6f385fe14257e59087985"} Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.972619 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.983526 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821"} Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.983715 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.985320 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c"} Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.985338 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a"} Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.986838 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-tx5bt" event={"ID":"d2ed1457-1153-41b5-8cbc-56599eeecba5","Type":"ContainerStarted","Data":"137dd7740ed19a10863c93270d0b74b226120d3550c6354736b2813fbb6402b5"} Jan 20 19:49:52 crc kubenswrapper[4948]: I0120 19:49:52.991832 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043022 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043364 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-cnibin\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043449 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-cnibin\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043466 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043562 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e21ac8a2-1e79-4191-b809-75085d432b31-cni-binary-copy\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043598 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-conf-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043634 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-netns\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043660 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-multus-certs\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043685 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-kubelet\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043716 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-system-cni-dir\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043732 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-os-release\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043749 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c6c006e4-2994-4ab8-bdfc-90703054f20d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043780 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-cni-bin\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043819 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-k8s-cni-cncf-io\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043840 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-etc-kubernetes\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043855 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-cnibin\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043882 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q6jt\" (UniqueName: \"kubernetes.io/projected/c6c006e4-2994-4ab8-bdfc-90703054f20d-kube-api-access-4q6jt\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043898 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-system-cni-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043913 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-os-release\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043928 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prr4t\" (UniqueName: \"kubernetes.io/projected/e21ac8a2-1e79-4191-b809-75085d432b31-kube-api-access-prr4t\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043943 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e21ac8a2-1e79-4191-b809-75085d432b31-multus-daemon-config\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043965 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-socket-dir-parent\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.043979 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-hostroot\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.044010 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c6c006e4-2994-4ab8-bdfc-90703054f20d-cni-binary-copy\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.044038 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-cni-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.044054 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-cni-multus\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.044553 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-etc-kubernetes\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.044602 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-conf-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.044650 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-netns\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.044691 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-multus-certs\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.045038 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/e21ac8a2-1e79-4191-b809-75085d432b31-cni-binary-copy\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.045081 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-cnibin\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.045087 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-kubelet\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.045156 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-system-cni-dir\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.045364 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-system-cni-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.045450 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-os-release\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.045458 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c6c006e4-2994-4ab8-bdfc-90703054f20d-os-release\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046042 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/e21ac8a2-1e79-4191-b809-75085d432b31-multus-daemon-config\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046105 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-socket-dir-parent\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046129 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-hostroot\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046448 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c6c006e4-2994-4ab8-bdfc-90703054f20d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046506 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-cni-bin\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046551 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c6c006e4-2994-4ab8-bdfc-90703054f20d-cni-binary-copy\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046651 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-multus-cni-dir\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046679 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-var-lib-cni-multus\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.046872 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/e21ac8a2-1e79-4191-b809-75085d432b31-host-run-k8s-cni-cncf-io\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.321254 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q6jt\" (UniqueName: \"kubernetes.io/projected/c6c006e4-2994-4ab8-bdfc-90703054f20d-kube-api-access-4q6jt\") pod \"multus-additional-cni-plugins-ms8h8\" (UID: \"c6c006e4-2994-4ab8-bdfc-90703054f20d\") " pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.329842 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.334028 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.359817 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prr4t\" (UniqueName: \"kubernetes.io/projected/e21ac8a2-1e79-4191-b809-75085d432b31-kube-api-access-prr4t\") pod \"multus-qttfm\" (UID: \"e21ac8a2-1e79-4191-b809-75085d432b31\") " pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.363507 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rtkhq"] Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.364227 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.364586 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.385940 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.392786 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.398867 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.399736 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.399929 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-qttfm" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.410059 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.410568 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.410723 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.437968 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.441813 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552419 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55f6g\" (UniqueName: \"kubernetes.io/projected/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-kube-api-access-55f6g\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552735 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-log-socket\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552757 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-bin\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552773 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-kubelet\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552790 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552821 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-etc-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552837 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-ovn\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552851 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-ovn-kubernetes\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552865 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-env-overrides\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552878 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552892 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-node-log\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552906 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-config\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552920 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-var-lib-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552934 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-script-lib\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552955 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-netns\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552967 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-netd\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552981 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-systemd-units\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.552994 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-systemd\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.553007 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-slash\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.553021 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovn-node-metrics-cert\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.553122 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 13:40:40.984983123 +0000 UTC Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.561516 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.561508 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.636131 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679715 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-netns\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679750 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-netd\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679775 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-systemd-units\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679789 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-systemd\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679804 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-slash\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679819 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovn-node-metrics-cert\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679833 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55f6g\" (UniqueName: \"kubernetes.io/projected/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-kube-api-access-55f6g\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679862 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-log-socket\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679883 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-bin\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679896 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-kubelet\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679916 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679944 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-etc-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679956 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-ovn\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679970 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-ovn-kubernetes\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679984 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-env-overrides\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.679997 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.680011 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-node-log\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.680026 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-var-lib-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.680234 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-config\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.680256 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-script-lib\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.680915 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-kubelet\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681043 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-netns\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681052 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-script-lib\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681079 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-netd\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681110 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681112 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-systemd-units\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681138 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-systemd\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681162 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-node-log\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681173 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-slash\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681191 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-var-lib-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681400 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-env-overrides\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681461 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681488 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-etc-openvswitch\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681509 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-ovn\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681532 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-ovn-kubernetes\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681571 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-log-socket\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681642 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-config\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.681678 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-bin\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.705632 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.806203 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.807179 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovn-node-metrics-cert\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.814234 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.831540 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55f6g\" (UniqueName: \"kubernetes.io/projected/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-kube-api-access-55f6g\") pod \"ovnkube-node-rtkhq\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.833912 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.857539 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.882531 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.885916 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.890622 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.896571 4948 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.898284 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.898773 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.898816 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.898825 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.898969 4948 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.910374 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.911623 4948 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.911867 4948 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.912677 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.912769 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.912786 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.912803 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.912814 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:53Z","lastTransitionTime":"2026-01-20T19:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.920174 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: E0120 19:49:53.935827 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.937496 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.940817 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.940843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.940853 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.940866 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.940890 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:53Z","lastTransitionTime":"2026-01-20T19:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.952591 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: E0120 19:49:53.952801 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.958333 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.958402 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.958417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.958456 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.958477 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:53Z","lastTransitionTime":"2026-01-20T19:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.967578 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: E0120 19:49:53.968826 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.977449 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.977490 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.977523 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.977542 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.977554 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:53Z","lastTransitionTime":"2026-01-20T19:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.988827 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.988863 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:49:53 crc kubenswrapper[4948]: E0120 19:49:53.988968 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.992184 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-tx5bt" event={"ID":"d2ed1457-1153-41b5-8cbc-56599eeecba5","Type":"ContainerStarted","Data":"c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.992767 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.992797 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.992806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.992819 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.992827 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:53Z","lastTransitionTime":"2026-01-20T19:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.993458 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qttfm" event={"ID":"e21ac8a2-1e79-4191-b809-75085d432b31","Type":"ContainerStarted","Data":"9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.993497 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qttfm" event={"ID":"e21ac8a2-1e79-4191-b809-75085d432b31","Type":"ContainerStarted","Data":"e9848ab004fb59a2161f242b84fd212ca5273778d7b2d1fb49cfdb1770839159"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.995264 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.995292 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.996690 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerStarted","Data":"29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130"} Jan 20 19:49:53 crc kubenswrapper[4948]: I0120 19:49:53.996730 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerStarted","Data":"895da787239e83b3f041dc7c6510f9b6e2ea580d5c686bb1f37eb196cd24b21c"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.004857 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.010258 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.010530 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.013388 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.013408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.013416 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.013428 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.013454 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:54Z","lastTransitionTime":"2026-01-20T19:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.027422 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.035355 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.047638 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.057028 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.133521 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.133549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.133558 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.133570 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.133580 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:54Z","lastTransitionTime":"2026-01-20T19:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.146004 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.160971 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.282173 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.301745 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.301779 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.301791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.301806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.301818 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:54Z","lastTransitionTime":"2026-01-20T19:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.391841 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.403652 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.403694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.403723 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.403740 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.403750 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:54Z","lastTransitionTime":"2026-01-20T19:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.405150 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.476936 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.489459 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.564493 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 18:33:13.711051682 +0000 UTC Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.564559 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.565975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.566011 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.566021 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.566037 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.566047 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:54Z","lastTransitionTime":"2026-01-20T19:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.569643 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.569770 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.570072 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.570131 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.570178 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.570229 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.583036 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.585850 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.585949 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.585977 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.585996 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586031 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:49:58.586002606 +0000 UTC m=+26.536727595 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.586089 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586113 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586130 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586140 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586163 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586181 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:58.58616903 +0000 UTC m=+26.536893999 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586274 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:58.586252063 +0000 UTC m=+26.536977112 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586280 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586293 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586376 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:58.586354825 +0000 UTC m=+26.537079844 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586298 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586407 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:54 crc kubenswrapper[4948]: E0120 19:49:54.586442 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 19:49:58.586434578 +0000 UTC m=+26.537159647 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.602358 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.605912 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.608844 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.618993 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.632062 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.643526 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.678997 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.702826 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.706854 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.706882 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.706894 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.706909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.706920 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:54Z","lastTransitionTime":"2026-01-20T19:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.737992 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.758854 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.773099 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.787099 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.808098 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.808885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.808916 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.808926 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.808939 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.808950 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:54Z","lastTransitionTime":"2026-01-20T19:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.825273 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.840371 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.858494 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.865926 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-g49xj"] Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.866632 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.871360 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.874058 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.874473 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.874747 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.886096 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.900906 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.910823 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.910871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.910879 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.910896 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.910908 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:54Z","lastTransitionTime":"2026-01-20T19:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.916821 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.931991 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.956757 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.971412 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.981641 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.988584 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-serviceca\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.988771 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7th5\" (UniqueName: \"kubernetes.io/projected/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-kube-api-access-x7th5\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.988855 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-host\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:54 crc kubenswrapper[4948]: I0120 19:49:54.991553 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:54Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.000958 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b" exitCode=0 Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.001219 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.001286 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"5d37dbd9945b60a07b3620d4062a5cdd679c3caf924483de9be86f15dbe3b8a8"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.012725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.012779 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.012789 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.012800 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.012809 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.012817 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.042716 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.055373 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.106641 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-serviceca\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.106742 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7th5\" (UniqueName: \"kubernetes.io/projected/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-kube-api-access-x7th5\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.106829 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-host\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.108928 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-serviceca\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.110153 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-host\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.114998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.115142 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.115229 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.115341 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.115941 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.150963 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7th5\" (UniqueName: \"kubernetes.io/projected/2bc5bb03-140b-42e9-a874-a6f4b6baeac0-kube-api-access-x7th5\") pod \"node-ca-g49xj\" (UID: \"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\") " pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.155081 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.166236 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.181806 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.182050 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-g49xj" Jan 20 19:49:55 crc kubenswrapper[4948]: W0120 19:49:55.196576 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2bc5bb03_140b_42e9_a874_a6f4b6baeac0.slice/crio-d7e76dd40dc129d1ed9d0b6453c298a34636dc6b986fa66c238be0dedf28ca1e WatchSource:0}: Error finding container d7e76dd40dc129d1ed9d0b6453c298a34636dc6b986fa66c238be0dedf28ca1e: Status 404 returned error can't find the container with id d7e76dd40dc129d1ed9d0b6453c298a34636dc6b986fa66c238be0dedf28ca1e Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.217646 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.217953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.218038 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.218137 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.218223 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.219010 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.244536 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.288435 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.320518 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.331024 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.331060 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.331069 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.331083 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.331092 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.350250 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.366818 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.382637 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.398297 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.410345 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.425402 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.433582 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.433617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.433628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.433643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.433653 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.441532 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.453500 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.465637 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.475782 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.494697 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.511540 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.519759 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.535648 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.535690 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.535699 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.535727 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.535737 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.539082 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.565442 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 06:03:34.630407432 +0000 UTC Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.638320 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.638377 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.638387 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.638401 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.638411 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.703214 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.725717 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.741131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.741176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.741186 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.741206 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.741219 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.846409 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.846437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.846446 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.846458 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.846467 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.948942 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.948984 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.948992 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.949007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:55 crc kubenswrapper[4948]: I0120 19:49:55.949016 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:55Z","lastTransitionTime":"2026-01-20T19:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.011060 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.012307 4948 generic.go:334] "Generic (PLEG): container finished" podID="c6c006e4-2994-4ab8-bdfc-90703054f20d" containerID="29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130" exitCode=0 Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.012383 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerDied","Data":"29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.013551 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-g49xj" event={"ID":"2bc5bb03-140b-42e9-a874-a6f4b6baeac0","Type":"ContainerStarted","Data":"d7e76dd40dc129d1ed9d0b6453c298a34636dc6b986fa66c238be0dedf28ca1e"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.017095 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.042537 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.050722 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.050757 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.050768 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.050786 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.050799 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.059383 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.074967 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.097304 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.111612 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.124036 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.135679 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.149869 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.154835 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.154863 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.154871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.154896 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.154907 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.163452 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.190639 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.205937 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.227412 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.240017 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.252060 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.257607 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.257651 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.257659 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.257675 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.257684 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.266820 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.278145 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.300859 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.321117 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.360091 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.361202 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.361316 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.361397 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.361458 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.361513 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.383690 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.463441 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.463479 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.463487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.463506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.463515 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.471905 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.496136 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.565675 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.565735 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.565752 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.565768 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.565779 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.566202 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 02:41:03.992415558 +0000 UTC Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.569976 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:56 crc kubenswrapper[4948]: E0120 19:49:56.570169 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.570676 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:56 crc kubenswrapper[4948]: E0120 19:49:56.570785 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.570852 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:56 crc kubenswrapper[4948]: E0120 19:49:56.570907 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.574508 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.649966 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.667577 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.667642 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.667661 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.667688 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.667715 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.674481 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.718101 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.742543 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.764429 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.778792 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.778846 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.778859 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.778887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.778899 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.779392 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.793972 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:56Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.942873 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.942942 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.942951 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.942964 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:56 crc kubenswrapper[4948]: I0120 19:49:56.942973 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:56Z","lastTransitionTime":"2026-01-20T19:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.029163 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerStarted","Data":"c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.030794 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-g49xj" event={"ID":"2bc5bb03-140b-42e9-a874-a6f4b6baeac0","Type":"ContainerStarted","Data":"7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.033106 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.033330 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.046544 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.046587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.046597 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.046613 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.046626 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:57Z","lastTransitionTime":"2026-01-20T19:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.269062 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.283167 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.283204 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.283218 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.283233 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.283246 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:57Z","lastTransitionTime":"2026-01-20T19:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.283911 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.392606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.392634 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.392642 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.392656 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.392666 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:57Z","lastTransitionTime":"2026-01-20T19:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.395944 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.427549 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.442555 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.455046 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.470102 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.487168 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.586202 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 00:36:05.899013934 +0000 UTC Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.586430 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.586459 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.586467 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.586480 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.586488 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:57Z","lastTransitionTime":"2026-01-20T19:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.586545 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.604925 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.615522 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.631938 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.655609 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.665639 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.681839 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.717007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.717042 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.717050 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.717064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.717072 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:57Z","lastTransitionTime":"2026-01-20T19:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.766607 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.799159 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.925213 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.941639 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.956640 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.967358 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.971781 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.971809 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.971818 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.971832 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.971842 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:57Z","lastTransitionTime":"2026-01-20T19:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:57 crc kubenswrapper[4948]: I0120 19:49:57.978778 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.001943 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:57Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.054160 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a"} Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.126647 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:58Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.128999 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.129033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.129043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.129057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.129066 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:58Z","lastTransitionTime":"2026-01-20T19:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.149301 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:58Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.230790 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:58Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.232570 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.232730 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.232812 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.232884 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.232940 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:58Z","lastTransitionTime":"2026-01-20T19:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.244679 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:58Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.263644 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:58Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.284267 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:58Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.339925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.339954 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.339963 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.339975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.339984 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:58Z","lastTransitionTime":"2026-01-20T19:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.361560 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:58Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.476257 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.476499 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.476565 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.476633 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.476702 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:58Z","lastTransitionTime":"2026-01-20T19:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.586976 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 01:21:40.760360561 +0000 UTC Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.596246 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.596355 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.596425 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.596487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.596548 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:58Z","lastTransitionTime":"2026-01-20T19:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.630378 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.630485 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.630580 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.630764 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.630886 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.631005 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.636198 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.636387 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:50:06.636368457 +0000 UTC m=+34.587093416 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.636501 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.636596 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.636679 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.636760 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.636870 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.636959 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:06.636949973 +0000 UTC m=+34.587674942 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.636998 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637198 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:06.637189829 +0000 UTC m=+34.587914798 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637248 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637301 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637326 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637415 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:06.637386205 +0000 UTC m=+34.588111214 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637137 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637501 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637524 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:58 crc kubenswrapper[4948]: E0120 19:49:58.637581 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:06.637561799 +0000 UTC m=+34.588286828 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.699506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.699835 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.699907 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.699998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.700082 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:58Z","lastTransitionTime":"2026-01-20T19:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.832733 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.832952 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.833014 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.833076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.833133 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:58Z","lastTransitionTime":"2026-01-20T19:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.935936 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.936174 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.936269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.936389 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:58 crc kubenswrapper[4948]: I0120 19:49:58.936487 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:58Z","lastTransitionTime":"2026-01-20T19:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.039241 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.039297 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.039306 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.039319 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.039328 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.066243 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.066319 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.068568 4948 generic.go:334] "Generic (PLEG): container finished" podID="c6c006e4-2994-4ab8-bdfc-90703054f20d" containerID="c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44" exitCode=0 Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.068625 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerDied","Data":"c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.087868 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.104325 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.120220 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.131912 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.144418 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.144457 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.144468 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.144483 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.144498 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.147293 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.160417 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.178911 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.190574 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.203395 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.216929 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.227604 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.237548 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.246229 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.246264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.246273 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.246287 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.246296 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.257959 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.270168 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.281938 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:49:59Z is after 2025-08-24T17:21:41Z" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.349013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.349059 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.349071 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.349089 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.349101 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.452868 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.452909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.452922 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.452938 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.452950 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.555352 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.555397 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.555410 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.555438 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.555450 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.587119 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 01:28:54.863964119 +0000 UTC Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.658014 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.658091 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.658114 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.658139 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.658157 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.760974 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.761037 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.761060 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.761091 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.761113 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.864302 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.864360 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.864385 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.864416 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.864439 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.967158 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.967211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.967227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.967251 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:49:59 crc kubenswrapper[4948]: I0120 19:49:59.967268 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:49:59Z","lastTransitionTime":"2026-01-20T19:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.070064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.070102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.070113 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.070127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.070138 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.074786 4948 generic.go:334] "Generic (PLEG): container finished" podID="c6c006e4-2994-4ab8-bdfc-90703054f20d" containerID="19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9" exitCode=0 Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.074829 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerDied","Data":"19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.095969 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.110569 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.129180 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.153362 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.164431 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.171861 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.171909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.171925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.171947 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.171963 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.188432 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.206861 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.234500 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.254122 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.269038 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.274925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.274961 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.274970 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.274984 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.274994 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.285615 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.307320 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.322306 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.334319 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.345229 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:00Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.376763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.376814 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.376823 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.376836 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.376844 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.479229 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.479262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.479271 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.479283 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.479292 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.569034 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:00 crc kubenswrapper[4948]: E0120 19:50:00.569254 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.569374 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:00 crc kubenswrapper[4948]: E0120 19:50:00.569511 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.569575 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:00 crc kubenswrapper[4948]: E0120 19:50:00.569743 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.581888 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.581921 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.581932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.581948 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.581959 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.588187 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 00:48:10.098487018 +0000 UTC Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.684797 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.684926 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.684951 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.684982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.685005 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.788463 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.788535 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.788559 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.788587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.788609 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.891391 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.891444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.891462 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.891485 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.891502 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.993539 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.993595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.993610 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.993632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:00 crc kubenswrapper[4948]: I0120 19:50:00.993651 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:00Z","lastTransitionTime":"2026-01-20T19:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.084635 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.087601 4948 generic.go:334] "Generic (PLEG): container finished" podID="c6c006e4-2994-4ab8-bdfc-90703054f20d" containerID="c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7" exitCode=0 Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.087642 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerDied","Data":"c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.096449 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.096506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.096520 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.096576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.096589 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.108416 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.127521 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.150555 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.166161 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.178529 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.191012 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.199144 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.199182 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.199192 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.199207 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.199218 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.203041 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.223038 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.238574 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.252359 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.265525 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.278963 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.291211 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.301081 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.301110 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.301119 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.301137 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.301147 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.303237 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.320603 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:01Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.403763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.403938 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.404002 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.404019 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.404030 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.506934 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.506960 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.506970 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.506982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.506991 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.588507 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 03:40:26.490338696 +0000 UTC Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.609790 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.609832 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.609844 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.609862 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.609877 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.711887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.711951 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.711971 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.711998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.712016 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.814382 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.814438 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.814455 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.814473 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.814486 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.917295 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.917332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.917344 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.917359 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:01 crc kubenswrapper[4948]: I0120 19:50:01.917370 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:01Z","lastTransitionTime":"2026-01-20T19:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.019762 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.019810 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.019825 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.019845 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.019860 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.094217 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerStarted","Data":"712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.110109 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.123187 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.123233 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.123247 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.123265 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.123297 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.125225 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.141928 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.155276 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.165953 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.180021 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.191765 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.206438 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.223152 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.225872 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.225904 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.225915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.225929 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.225938 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.233285 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.245214 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.257295 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.268961 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.287062 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.298457 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.360005 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.360030 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.360037 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.360051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.360059 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.462357 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.462390 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.462397 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.462410 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.462419 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.566404 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.566481 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.566505 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.566544 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.566567 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.569867 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:02 crc kubenswrapper[4948]: E0120 19:50:02.570075 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.570131 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:02 crc kubenswrapper[4948]: E0120 19:50:02.570339 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.570396 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:02 crc kubenswrapper[4948]: E0120 19:50:02.570605 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.588923 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 10:41:33.377126332 +0000 UTC Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.589109 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.611571 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.644592 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.677778 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.679329 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.679423 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.679464 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.679489 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.679509 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.696484 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.715127 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.729844 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.749304 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.761063 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.772660 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.781380 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.781421 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.781454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.781469 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.781479 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.791138 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.804925 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.817341 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.827012 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.838001 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:02Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.884003 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.884041 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.884051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.884064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.884076 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.986751 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.986808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.986822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.986846 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:02 crc kubenswrapper[4948]: I0120 19:50:02.986858 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:02Z","lastTransitionTime":"2026-01-20T19:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.021210 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.038835 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.067356 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.083342 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.090152 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.090188 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.090196 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.090209 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.090218 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.102698 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.124208 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.143039 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.154398 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.164365 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.177247 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.199905 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.199952 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.199969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.199987 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.200003 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.201804 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.214795 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.232476 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.250594 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.271164 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.299855 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:03Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.304382 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.304409 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.304419 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.304444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.304456 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.407425 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.407826 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.407988 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.408225 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.408298 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.511194 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.511228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.511238 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.511253 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.511264 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.589975 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 17:46:57.038861737 +0000 UTC Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.613934 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.614000 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.614018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.614044 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.614063 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.717121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.717152 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.717160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.717176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.717185 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.819008 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.819086 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.819108 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.819135 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.819196 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.922145 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.922190 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.922199 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.922213 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:03 crc kubenswrapper[4948]: I0120 19:50:03.922222 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:03Z","lastTransitionTime":"2026-01-20T19:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.024481 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.024520 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.024532 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.024550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.024562 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.111324 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.112132 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.112395 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.115687 4948 generic.go:334] "Generic (PLEG): container finished" podID="c6c006e4-2994-4ab8-bdfc-90703054f20d" containerID="712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97" exitCode=0 Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.115759 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerDied","Data":"712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.127535 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.127569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.127585 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.127601 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.127615 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.129354 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.147082 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.157405 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.169690 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.170256 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.174051 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.184581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.184795 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.184889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.184975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.185050 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.191240 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.197236 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.201268 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.201293 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.201302 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.201317 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.201328 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.204235 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.212871 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.218052 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.218113 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.218124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.218148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.218172 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.219975 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.233101 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.234805 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.238277 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.238316 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.238325 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.238343 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.238353 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.248463 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.251864 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.261877 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.261914 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.261923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.261937 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.261945 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.293543 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.304597 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.304750 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.306148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.306180 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.306189 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.306203 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.306212 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.320855 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.341593 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.356131 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.373017 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.387888 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.400109 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.407946 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.407989 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.408001 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.408017 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.408029 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.510556 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.510614 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.510634 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.510655 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.510670 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.520105 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.537945 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.557952 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.568943 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.569088 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.568965 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.569174 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.568944 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:04 crc kubenswrapper[4948]: E0120 19:50:04.569260 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.571631 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.590318 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 06:31:37.819313235 +0000 UTC Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.590806 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.613379 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.613420 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.613429 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.613442 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.613450 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.613862 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.625065 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.643956 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.661215 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.675368 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.699808 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.711496 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.715092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.715120 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.715129 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.715142 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.715150 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.724897 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.735988 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:04Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.818063 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.818116 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.818140 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.818168 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.818193 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.920698 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.920807 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.920825 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.920878 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:04 crc kubenswrapper[4948]: I0120 19:50:04.920898 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:04Z","lastTransitionTime":"2026-01-20T19:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.023964 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.024000 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.024007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.024020 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.024029 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.122383 4948 generic.go:334] "Generic (PLEG): container finished" podID="c6c006e4-2994-4ab8-bdfc-90703054f20d" containerID="3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea" exitCode=0 Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.122455 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerDied","Data":"3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea"} Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.122511 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.125590 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.125619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.125630 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.125643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.125653 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.139872 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.157215 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.172016 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.194486 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.211640 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.226989 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.229098 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.229166 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.229210 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.229239 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.229261 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.244017 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.256531 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.268295 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.331795 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.331831 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.331843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.331865 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.331876 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.337994 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.351008 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.361464 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.370608 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.383199 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.397753 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:05Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.433959 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.433995 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.434004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.434017 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.434027 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.536470 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.536520 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.536531 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.536550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.536562 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.591374 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 20:52:25.746923228 +0000 UTC Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.639086 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.639119 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.639130 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.639147 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:05 crc kubenswrapper[4948]: I0120 19:50:05.639159 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.742323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.742390 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.742407 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.742431 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.742448 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.844431 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.844452 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.844460 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.844471 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.844479 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.947639 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.947955 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.948018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.948391 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:05.948461 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:05Z","lastTransitionTime":"2026-01-20T19:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.052863 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.052931 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.052955 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.052981 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.053000 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.130199 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.130883 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" event={"ID":"c6c006e4-2994-4ab8-bdfc-90703054f20d","Type":"ContainerStarted","Data":"1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.154544 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.155649 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.155684 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.155694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.155724 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.155737 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.166455 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.179518 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.257450 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.257488 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.257498 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.257512 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.257523 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.265694 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.284009 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.295242 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.304319 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.316441 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.326167 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.342032 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.355168 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.359125 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.359141 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.359148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.359161 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.359170 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.367924 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.377037 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.385779 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.392763 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.461257 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.461310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.461320 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.461333 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.461343 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.563953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.563977 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.563984 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.563996 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.564005 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.571423 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.571517 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.571821 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.571867 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.571902 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.571935 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.592655 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 00:12:55.674164267 +0000 UTC Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.646144 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv"] Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.646829 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.648604 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.649221 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.666927 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.666978 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.666991 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.667009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.667022 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.680454 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.697584 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.713418 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.717832 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.717983 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.718135 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:50:22.718098846 +0000 UTC m=+50.668823865 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.718308 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.718432 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.718535 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.718658 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.718783 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.718913 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk4xw\" (UniqueName: \"kubernetes.io/projected/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-kube-api-access-qk4xw\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.718315 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719041 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719070 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.718386 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.718844 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719183 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719196 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719152 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:22.719128254 +0000 UTC m=+50.669853273 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719242 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:22.719227586 +0000 UTC m=+50.669952555 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719247 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719254 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:22.719248457 +0000 UTC m=+50.669973426 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:06 crc kubenswrapper[4948]: E0120 19:50:06.719313 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:22.719300158 +0000 UTC m=+50.670025137 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.719490 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.728655 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.746100 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.762255 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.769318 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.769361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.769370 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.769388 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.769398 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.775642 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.788102 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.803656 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.821073 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk4xw\" (UniqueName: \"kubernetes.io/projected/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-kube-api-access-qk4xw\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.821191 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.821243 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.821306 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.822062 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.822131 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.823924 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.827959 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.887460 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.889497 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.889537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.889549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.889552 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk4xw\" (UniqueName: \"kubernetes.io/projected/f7d2a8aa-40b0-44d5-a210-c72d73b43f94-kube-api-access-qk4xw\") pod \"ovnkube-control-plane-749d76644c-qmlxv\" (UID: \"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.889565 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.889634 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.905052 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.918043 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.928530 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.941131 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.963180 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:06Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.971228 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" Jan 20 19:50:06 crc kubenswrapper[4948]: W0120 19:50:06.989308 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7d2a8aa_40b0_44d5_a210_c72d73b43f94.slice/crio-f2ca6c4c3cb6255295ff990ac3444f5bd9c9ccc7d9adbd11177243672e2b71e9 WatchSource:0}: Error finding container f2ca6c4c3cb6255295ff990ac3444f5bd9c9ccc7d9adbd11177243672e2b71e9: Status 404 returned error can't find the container with id f2ca6c4c3cb6255295ff990ac3444f5bd9c9ccc7d9adbd11177243672e2b71e9 Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.991462 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.991493 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.991507 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.991969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:06 crc kubenswrapper[4948]: I0120 19:50:06.991986 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:06Z","lastTransitionTime":"2026-01-20T19:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.093982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.094032 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.094041 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.094060 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.094072 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.134649 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" event={"ID":"f7d2a8aa-40b0-44d5-a210-c72d73b43f94","Type":"ContainerStarted","Data":"f2ca6c4c3cb6255295ff990ac3444f5bd9c9ccc7d9adbd11177243672e2b71e9"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.196282 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.196310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.196323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.196337 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.196347 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.299130 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.299170 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.299181 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.299198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.299209 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.406647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.406748 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.406767 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.406806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.406824 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.510617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.510687 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.510765 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.510801 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.510825 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.593065 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 10:43:32.088559275 +0000 UTC Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.613801 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.613847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.613862 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.613880 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.613894 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.716118 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.716154 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.716165 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.716182 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.716195 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.818555 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.818615 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.818627 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.818646 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.818659 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.922230 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.922303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.922323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.922347 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:07 crc kubenswrapper[4948]: I0120 19:50:07.922364 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:07Z","lastTransitionTime":"2026-01-20T19:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.026064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.026123 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.026141 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.026164 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.026182 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.128488 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.128545 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.128562 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.128585 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.128602 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.141771 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/0.log" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.146503 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363" exitCode=1 Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.146612 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.148058 4948 scope.go:117] "RemoveContainer" containerID="eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.150350 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" event={"ID":"f7d2a8aa-40b0-44d5-a210-c72d73b43f94","Type":"ContainerStarted","Data":"655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.150887 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" event={"ID":"f7d2a8aa-40b0-44d5-a210-c72d73b43f94","Type":"ContainerStarted","Data":"bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.178554 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.189736 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-h4c6s"] Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.190440 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:08 crc kubenswrapper[4948]: E0120 19:50:08.190527 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.203778 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.223860 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.230526 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.230564 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.230578 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.230600 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.230613 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.237012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.237218 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dt6b\" (UniqueName: \"kubernetes.io/projected/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-kube-api-access-5dt6b\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.245368 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.264649 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.278996 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.291983 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.308422 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.326776 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.332408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.332436 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.332446 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.332458 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.332468 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.338268 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dt6b\" (UniqueName: \"kubernetes.io/projected/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-kube-api-access-5dt6b\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.338342 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:08 crc kubenswrapper[4948]: E0120 19:50:08.338451 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:08 crc kubenswrapper[4948]: E0120 19:50:08.338523 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs podName:dbfcfce6-0ab8-40ba-80b2-d391a7dd5418 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:08.838502912 +0000 UTC m=+36.789227901 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs") pod "network-metrics-daemon-h4c6s" (UID: "dbfcfce6-0ab8-40ba-80b2-d391a7dd5418") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.353371 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:07Z\\\",\\\"message\\\":\\\"flector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:07.563432 6133 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 19:50:07.563474 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 19:50:07.563483 6133 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 19:50:07.563505 6133 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:07.563551 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 19:50:07.563549 6133 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 19:50:07.563570 6133 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 19:50:07.563579 6133 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0120 19:50:07.563604 6133 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 19:50:07.563621 6133 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0120 19:50:07.563646 6133 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:07.563655 6133 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:07.563771 6133 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0120 19:50:07.563784 6133 factory.go:656] Stopping watch factory\\\\nI0120 19:50:07.563805 6133 ovnkube.go:599] Stopped ovnkube\\\\nI0120 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.359440 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dt6b\" (UniqueName: \"kubernetes.io/projected/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-kube-api-access-5dt6b\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.368822 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.381557 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.393261 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.405549 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.428020 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.435378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.435411 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.435422 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.435439 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.435449 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.439579 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.451539 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.465957 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.486406 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.518840 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.529769 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.538224 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.538264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.538275 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.538291 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.538302 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.544671 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.555473 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.570041 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.590923 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:07Z\\\",\\\"message\\\":\\\"flector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:07.563432 6133 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 19:50:07.563474 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 19:50:07.563483 6133 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 19:50:07.563505 6133 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:07.563551 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 19:50:07.563549 6133 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 19:50:07.563570 6133 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 19:50:07.563579 6133 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0120 19:50:07.563604 6133 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 19:50:07.563621 6133 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0120 19:50:07.563646 6133 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:07.563655 6133 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:07.563771 6133 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0120 19:50:07.563784 6133 factory.go:656] Stopping watch factory\\\\nI0120 19:50:07.563805 6133 ovnkube.go:599] Stopped ovnkube\\\\nI0120 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.601864 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.614686 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.629044 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.640437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.640466 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.640478 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.640494 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.640504 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.641485 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.653997 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.678404 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.692044 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.705665 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:08Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.745244 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.745290 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.745307 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.745331 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.745349 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.804222 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 03:17:32.828729133 +0000 UTC Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.804446 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.804477 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.804497 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:08 crc kubenswrapper[4948]: E0120 19:50:08.804632 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:08 crc kubenswrapper[4948]: E0120 19:50:08.804871 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:08 crc kubenswrapper[4948]: E0120 19:50:08.805067 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.848047 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.848088 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.848102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.848122 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:08 crc kubenswrapper[4948]: I0120 19:50:08.848139 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:08.904955 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:09 crc kubenswrapper[4948]: E0120 19:50:08.905081 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:09 crc kubenswrapper[4948]: E0120 19:50:08.905139 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs podName:dbfcfce6-0ab8-40ba-80b2-d391a7dd5418 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:09.905121717 +0000 UTC m=+37.855846686 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs") pod "network-metrics-daemon-h4c6s" (UID: "dbfcfce6-0ab8-40ba-80b2-d391a7dd5418") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:08.950232 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:08.950269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:08.950283 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:08.950306 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:08.950320 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:08Z","lastTransitionTime":"2026-01-20T19:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.053867 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.053917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.053933 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.053957 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.053973 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:09Z","lastTransitionTime":"2026-01-20T19:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.155802 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/0.log" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.156044 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.156069 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.156078 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.156095 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.156106 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:09Z","lastTransitionTime":"2026-01-20T19:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.158750 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.159055 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.175787 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.188657 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.202063 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.215651 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.229884 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.246045 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.268819 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.290621 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.315414 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.323948 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.335207 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.358032 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:07Z\\\",\\\"message\\\":\\\"flector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:07.563432 6133 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 19:50:07.563474 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 19:50:07.563483 6133 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 19:50:07.563505 6133 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:07.563551 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 19:50:07.563549 6133 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 19:50:07.563570 6133 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 19:50:07.563579 6133 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0120 19:50:07.563604 6133 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 19:50:07.563621 6133 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0120 19:50:07.563646 6133 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:07.563655 6133 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:07.563771 6133 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0120 19:50:07.563784 6133 factory.go:656] Stopping watch factory\\\\nI0120 19:50:07.563805 6133 ovnkube.go:599] Stopped ovnkube\\\\nI0120 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.369333 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.380920 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.400975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.401014 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.401026 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.401045 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.401058 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:09Z","lastTransitionTime":"2026-01-20T19:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.420973 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.432471 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.445084 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.504685 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.504748 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.504760 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.504781 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.504791 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:09Z","lastTransitionTime":"2026-01-20T19:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.569063 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:09 crc kubenswrapper[4948]: E0120 19:50:09.569188 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.607026 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.607074 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.607084 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.607096 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.607106 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:09Z","lastTransitionTime":"2026-01-20T19:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.709923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.709969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.709982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.709998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.710008 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:09Z","lastTransitionTime":"2026-01-20T19:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.805297 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 03:59:31.557418843 +0000 UTC Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.812651 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.812737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.812765 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.812798 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.812820 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:09Z","lastTransitionTime":"2026-01-20T19:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.907316 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:09 crc kubenswrapper[4948]: E0120 19:50:09.907498 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:09 crc kubenswrapper[4948]: E0120 19:50:09.907644 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs podName:dbfcfce6-0ab8-40ba-80b2-d391a7dd5418 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:11.907610381 +0000 UTC m=+39.858335390 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs") pod "network-metrics-daemon-h4c6s" (UID: "dbfcfce6-0ab8-40ba-80b2-d391a7dd5418") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.915696 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.915767 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.915783 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.915805 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:09 crc kubenswrapper[4948]: I0120 19:50:09.915822 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:09Z","lastTransitionTime":"2026-01-20T19:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.019323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.019380 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.019394 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.019421 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.019440 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.121689 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.121783 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.121817 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.121847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.121886 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.163658 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/1.log" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.164527 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/0.log" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.168019 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19" exitCode=1 Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.168048 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.168115 4948 scope.go:117] "RemoveContainer" containerID="eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.170037 4948 scope.go:117] "RemoveContainer" containerID="d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19" Jan 20 19:50:10 crc kubenswrapper[4948]: E0120 19:50:10.170369 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.206287 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:07Z\\\",\\\"message\\\":\\\"flector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:07.563432 6133 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 19:50:07.563474 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 19:50:07.563483 6133 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 19:50:07.563505 6133 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:07.563551 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 19:50:07.563549 6133 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 19:50:07.563570 6133 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 19:50:07.563579 6133 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0120 19:50:07.563604 6133 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 19:50:07.563621 6133 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0120 19:50:07.563646 6133 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:07.563655 6133 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:07.563771 6133 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0120 19:50:07.563784 6133 factory.go:656] Stopping watch factory\\\\nI0120 19:50:07.563805 6133 ovnkube.go:599] Stopped ovnkube\\\\nI0120 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:09Z\\\",\\\"message\\\":\\\"Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z]\\\\nI0120 19:50:09.787208 6330 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0120 19:50:09.78\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.221138 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.224235 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.224266 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.224277 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.224293 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.224304 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.240317 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.254369 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.266679 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.279045 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.293000 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.302731 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.319561 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.326043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.326084 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.326099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.326114 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.326125 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.331663 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.342163 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.351844 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.360277 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.370214 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.379894 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.390433 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.403655 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:10Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.428863 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.428925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.428941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.428964 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.428978 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.532871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.532937 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.532963 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.532992 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.533015 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.569409 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.569455 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:10 crc kubenswrapper[4948]: E0120 19:50:10.569546 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:10 crc kubenswrapper[4948]: E0120 19:50:10.569700 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.570201 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:10 crc kubenswrapper[4948]: E0120 19:50:10.570271 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.635413 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.635454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.635466 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.635482 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.635494 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.738227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.738279 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.738290 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.738307 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.738322 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.806446 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 21:41:10.40367448 +0000 UTC Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.840065 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.840117 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.840130 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.840177 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.840189 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.943485 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.943972 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.944296 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.944545 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:10 crc kubenswrapper[4948]: I0120 19:50:10.944881 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:10Z","lastTransitionTime":"2026-01-20T19:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.047785 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.047843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.047865 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.047892 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.047927 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.149908 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.149953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.149969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.149999 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.150024 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.172085 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/1.log" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.252841 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.253174 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.253334 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.253481 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.253681 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.356860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.357177 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.357326 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.357511 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.357636 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.460024 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.460454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.460591 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.460764 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.460914 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.564048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.564107 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.564124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.564148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.564168 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.569342 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:11 crc kubenswrapper[4948]: E0120 19:50:11.569518 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.667108 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.667159 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.667174 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.667195 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.667212 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.770371 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.770428 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.770442 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.770462 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.770476 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.806636 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 23:05:02.258325542 +0000 UTC Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.873824 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.873906 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.873923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.873944 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.873961 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.937274 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:11 crc kubenswrapper[4948]: E0120 19:50:11.937539 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:11 crc kubenswrapper[4948]: E0120 19:50:11.937670 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs podName:dbfcfce6-0ab8-40ba-80b2-d391a7dd5418 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:15.937641771 +0000 UTC m=+43.888366780 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs") pod "network-metrics-daemon-h4c6s" (UID: "dbfcfce6-0ab8-40ba-80b2-d391a7dd5418") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.976404 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.976448 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.976460 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.976476 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:11 crc kubenswrapper[4948]: I0120 19:50:11.976488 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:11Z","lastTransitionTime":"2026-01-20T19:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.078495 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.078764 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.078776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.078791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.078802 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.211062 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.211142 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.211159 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.211212 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.211227 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.314158 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.314208 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.314263 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.314285 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.314305 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.416887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.416926 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.416934 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.416949 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.416959 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.519842 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.519889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.519899 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.519915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.519926 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.569642 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.569772 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:12 crc kubenswrapper[4948]: E0120 19:50:12.569864 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.569885 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:12 crc kubenswrapper[4948]: E0120 19:50:12.570087 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:12 crc kubenswrapper[4948]: E0120 19:50:12.570314 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.585115 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.599387 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.612039 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.623115 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.623149 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.623161 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.623178 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.623189 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.643353 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.672254 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.687992 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.703071 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.717990 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.725136 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.725211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.725236 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.725264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.725307 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.739303 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.765096 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:07Z\\\",\\\"message\\\":\\\"flector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:07.563432 6133 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 19:50:07.563474 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 19:50:07.563483 6133 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 19:50:07.563505 6133 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:07.563551 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 19:50:07.563549 6133 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 19:50:07.563570 6133 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 19:50:07.563579 6133 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0120 19:50:07.563604 6133 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 19:50:07.563621 6133 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0120 19:50:07.563646 6133 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:07.563655 6133 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:07.563771 6133 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0120 19:50:07.563784 6133 factory.go:656] Stopping watch factory\\\\nI0120 19:50:07.563805 6133 ovnkube.go:599] Stopped ovnkube\\\\nI0120 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:09Z\\\",\\\"message\\\":\\\"Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z]\\\\nI0120 19:50:09.787208 6330 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0120 19:50:09.78\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.779044 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.798486 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.806953 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 14:17:49.156350644 +0000 UTC Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.816145 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.828028 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.828110 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.828123 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.828162 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.828186 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.837284 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.857090 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.885406 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.899559 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:12Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.930448 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.930486 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.930497 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.930515 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:12 crc kubenswrapper[4948]: I0120 19:50:12.930528 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:12Z","lastTransitionTime":"2026-01-20T19:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.033855 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.033922 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.033939 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.033964 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.033982 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.137582 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.137629 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.137641 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.137658 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.137672 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.240236 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.240314 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.240328 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.240345 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.240359 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.343697 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.343796 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.343820 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.343847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.343865 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.447639 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.447680 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.447691 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.447729 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.447744 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.551201 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.551300 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.551326 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.551361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.551390 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.569545 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:13 crc kubenswrapper[4948]: E0120 19:50:13.569841 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.654259 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.654314 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.654327 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.654343 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.654355 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.757051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.757336 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.757454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.757596 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.757766 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.807190 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 23:07:11.284081184 +0000 UTC Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.860770 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.860854 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.861250 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.861324 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.861344 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.964280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.964350 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.964379 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.964409 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:13 crc kubenswrapper[4948]: I0120 19:50:13.964429 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:13Z","lastTransitionTime":"2026-01-20T19:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.067481 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.067530 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.067542 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.067562 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.067575 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.169824 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.169889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.169905 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.169925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.169941 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.272442 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.272490 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.272506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.272526 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.272542 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.375340 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.375390 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.375404 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.375423 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.375503 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.397643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.397680 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.397687 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.397722 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.397732 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.413757 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:14Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.418403 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.418454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.418467 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.418482 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.418514 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.431602 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:14Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.435505 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.435571 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.435599 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.435628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.435653 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.449157 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:14Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.454359 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.454394 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.454405 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.454420 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.454431 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.474152 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:14Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.478609 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.478660 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.478677 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.478701 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.478749 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.498219 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:14Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.498383 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.500674 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.500725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.500737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.500751 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.500762 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.569816 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.569875 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.569890 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.569996 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.570134 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:14 crc kubenswrapper[4948]: E0120 19:50:14.570282 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.603890 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.604003 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.604027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.604102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.604125 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.707457 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.707507 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.707518 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.707536 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.707549 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.808279 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 19:19:24.328287019 +0000 UTC Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.811035 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.811092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.811110 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.811134 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.811150 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.914199 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.914296 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.914322 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.914352 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:14 crc kubenswrapper[4948]: I0120 19:50:14.914379 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:14Z","lastTransitionTime":"2026-01-20T19:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.017295 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.017360 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.017379 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.017403 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.017422 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.120702 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.120806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.120828 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.120857 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.120879 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.224305 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.224362 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.224384 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.224412 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.224437 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.327177 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.327280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.327363 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.327431 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.327457 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.431254 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.431366 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.431391 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.431426 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.431448 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.539923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.539987 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.540005 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.540029 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.540045 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.569759 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:15 crc kubenswrapper[4948]: E0120 19:50:15.569954 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.643476 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.643556 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.643592 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.643621 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.643641 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.747099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.747149 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.747167 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.747189 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.747205 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.809256 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 08:12:01.692470239 +0000 UTC Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.849386 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.849414 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.849422 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.849559 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.849576 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.952809 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.952963 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.952988 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.953013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.953032 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:15Z","lastTransitionTime":"2026-01-20T19:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:15 crc kubenswrapper[4948]: I0120 19:50:15.974575 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:15 crc kubenswrapper[4948]: E0120 19:50:15.974773 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:15 crc kubenswrapper[4948]: E0120 19:50:15.974836 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs podName:dbfcfce6-0ab8-40ba-80b2-d391a7dd5418 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:23.974819757 +0000 UTC m=+51.925544736 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs") pod "network-metrics-daemon-h4c6s" (UID: "dbfcfce6-0ab8-40ba-80b2-d391a7dd5418") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.055272 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.055338 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.055352 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.055375 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.055398 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.157872 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.157918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.157929 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.157944 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.157956 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.260643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.260684 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.260695 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.260737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.260751 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.363762 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.363805 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.363815 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.363834 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.363846 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.465977 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.466071 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.466087 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.466104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.466115 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.568576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.568651 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.568676 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.568752 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.568778 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.569145 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.569163 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:16 crc kubenswrapper[4948]: E0120 19:50:16.569331 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.569469 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:16 crc kubenswrapper[4948]: E0120 19:50:16.569581 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:16 crc kubenswrapper[4948]: E0120 19:50:16.569653 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.671501 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.671576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.671596 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.671620 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.671638 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.774008 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.774056 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.774071 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.774090 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.774107 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.809619 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 15:25:16.580942263 +0000 UTC Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.877262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.877311 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.877321 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.877339 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.877352 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.980105 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.980256 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.980281 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.980312 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:16 crc kubenswrapper[4948]: I0120 19:50:16.980335 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:16Z","lastTransitionTime":"2026-01-20T19:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.082847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.082911 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.082930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.082954 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.082971 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.185702 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.185773 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.185786 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.185804 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.185818 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.288630 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.288681 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.288697 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.288745 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.288761 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.391080 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.391140 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.391162 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.391189 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.391210 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.494130 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.494205 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.494214 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.494228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.494236 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.569918 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:17 crc kubenswrapper[4948]: E0120 19:50:17.570079 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.596477 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.596549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.596571 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.596595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.596612 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.699692 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.699788 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.699811 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.699843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.699865 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.802567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.802620 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.802636 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.802657 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.802672 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.809755 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 12:34:55.884955873 +0000 UTC Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.905619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.905697 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.905765 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.905798 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:17 crc kubenswrapper[4948]: I0120 19:50:17.905819 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:17Z","lastTransitionTime":"2026-01-20T19:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.009072 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.009139 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.009163 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.009192 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.009216 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.112020 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.112089 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.112109 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.112134 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.112152 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.232957 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.233015 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.233034 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.233059 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.233079 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.336753 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.336805 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.336823 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.336845 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.336863 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.438789 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.438851 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.438878 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.438910 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.438934 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.541613 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.541682 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.541730 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.541757 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.541774 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.569261 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.569319 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.569295 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:18 crc kubenswrapper[4948]: E0120 19:50:18.569412 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:18 crc kubenswrapper[4948]: E0120 19:50:18.569499 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:18 crc kubenswrapper[4948]: E0120 19:50:18.569592 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.645457 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.645537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.645564 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.645593 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.645617 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.683876 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.694254 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.713164 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.732954 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.747614 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.748454 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.748499 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.748510 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.748527 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.748837 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.759355 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.769392 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.780757 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.792741 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.803415 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.810767 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 22:34:29.005227244 +0000 UTC Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.816838 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.825991 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.837817 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.849463 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.850798 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.850825 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.850835 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.850850 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.850861 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.862480 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.871802 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.884364 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.900437 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb2f97456255477c9264980b71052ac5cf79344a2de362e27f9ee38366ce6363\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:07Z\\\",\\\"message\\\":\\\"flector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:07.563432 6133 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0120 19:50:07.563474 6133 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0120 19:50:07.563483 6133 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0120 19:50:07.563505 6133 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:07.563551 6133 handler.go:208] Removed *v1.Node event handler 2\\\\nI0120 19:50:07.563549 6133 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0120 19:50:07.563570 6133 handler.go:208] Removed *v1.Node event handler 7\\\\nI0120 19:50:07.563579 6133 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0120 19:50:07.563604 6133 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0120 19:50:07.563621 6133 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0120 19:50:07.563646 6133 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:07.563655 6133 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:07.563771 6133 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0120 19:50:07.563784 6133 factory.go:656] Stopping watch factory\\\\nI0120 19:50:07.563805 6133 ovnkube.go:599] Stopped ovnkube\\\\nI0120 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:09Z\\\",\\\"message\\\":\\\"Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z]\\\\nI0120 19:50:09.787208 6330 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0120 19:50:09.78\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.909633 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:18Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.953348 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.953382 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.953392 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.953407 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:18 crc kubenswrapper[4948]: I0120 19:50:18.953417 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:18Z","lastTransitionTime":"2026-01-20T19:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.060881 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.060948 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.060969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.061004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.061023 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.164364 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.164554 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.164581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.164657 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.164684 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.267649 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.267701 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.267734 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.267757 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.267790 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.371092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.371161 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.371184 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.371213 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.371247 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.473930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.473988 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.474004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.474032 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.474049 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.569559 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:19 crc kubenswrapper[4948]: E0120 19:50:19.570125 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.577575 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.577632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.577649 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.577686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.577736 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.680523 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.680573 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.680586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.680603 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.680614 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.755653 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.756874 4948 scope.go:117] "RemoveContainer" containerID="d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19" Jan 20 19:50:19 crc kubenswrapper[4948]: E0120 19:50:19.757121 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.783513 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.783577 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.783636 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.783659 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.783675 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.798170 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.811214 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 17:37:21.6342902 +0000 UTC Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.814765 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.827776 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.844212 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.857347 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.870997 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.885886 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.885934 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.885943 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.885974 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.885984 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.891636 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.904205 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.920918 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.933916 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.955206 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:09Z\\\",\\\"message\\\":\\\"Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z]\\\\nI0120 19:50:09.787208 6330 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0120 19:50:09.78\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.966372 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.979605 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.988906 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.988952 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.988964 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.989014 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.989029 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:19Z","lastTransitionTime":"2026-01-20T19:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:19 crc kubenswrapper[4948]: I0120 19:50:19.992803 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:19Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.006081 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:20Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.020527 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:20Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.032416 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:20Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.044491 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:20Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.091487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.091528 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.091537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.091553 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.091563 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.194849 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.194915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.194933 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.194958 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.194977 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.297643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.297727 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.297745 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.297764 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.297779 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.400799 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.400851 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.400866 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.400889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.400906 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.504028 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.504113 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.504145 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.504176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.504202 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.570082 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:20 crc kubenswrapper[4948]: E0120 19:50:20.570238 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.570308 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.570341 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:20 crc kubenswrapper[4948]: E0120 19:50:20.570518 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:20 crc kubenswrapper[4948]: E0120 19:50:20.570635 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.606950 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.607009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.607026 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.607050 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.607069 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.710156 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.710196 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.710205 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.710219 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.710230 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.811764 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 10:26:16.553657326 +0000 UTC Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.813269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.813316 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.813348 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.813395 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.813412 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.916489 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.916563 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.916581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.916606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:20 crc kubenswrapper[4948]: I0120 19:50:20.916625 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:20Z","lastTransitionTime":"2026-01-20T19:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.020489 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.020547 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.020563 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.020586 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.020606 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.123219 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.123284 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.123303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.123326 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.123346 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.226270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.226322 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.226342 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.226365 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.226382 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.328971 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.329035 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.329047 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.329063 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.329075 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.431808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.431901 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.431925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.431958 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.431980 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.535117 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.535181 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.535198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.535222 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.535240 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.569860 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:21 crc kubenswrapper[4948]: E0120 19:50:21.570066 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.637602 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.637670 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.637694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.637763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.637794 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.739958 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.740064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.740090 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.740122 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.740150 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.812451 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 00:56:20.141890696 +0000 UTC Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.842787 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.842816 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.842826 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.842840 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.842851 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.944853 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.944908 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.944923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.944940 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:21 crc kubenswrapper[4948]: I0120 19:50:21.944952 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:21Z","lastTransitionTime":"2026-01-20T19:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.048026 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.048074 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.048088 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.048107 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.048137 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.150223 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.150273 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.150287 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.150310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.150324 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.252521 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.252577 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.252588 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.252609 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.252622 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.355492 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.355616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.355632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.355654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.355667 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.458512 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.458583 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.458693 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.458772 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.458795 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.562758 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.562840 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.562870 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.562902 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.562926 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.569677 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.569924 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.569951 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.570039 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.570186 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.570321 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.587844 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.603983 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.616668 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.637850 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.660237 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:09Z\\\",\\\"message\\\":\\\"Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z]\\\\nI0120 19:50:09.787208 6330 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0120 19:50:09.78\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.664502 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.664532 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.664544 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.664559 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.664572 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.673489 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.694501 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.709075 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.721026 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.734405 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.745203 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.745354 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.745396 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745446 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:50:54.745415713 +0000 UTC m=+82.696140692 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745503 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745577 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:54.745555037 +0000 UTC m=+82.696280046 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745592 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745620 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745626 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745637 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745645 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745651 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.745504 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745692 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:54.745682821 +0000 UTC m=+82.696407900 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745739 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:54.745730922 +0000 UTC m=+82.696456031 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.745799 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745915 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:50:22 crc kubenswrapper[4948]: E0120 19:50:22.745972 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:54.745943688 +0000 UTC m=+82.696668737 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.761553 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.766444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.766481 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.766490 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.766506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.766518 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.775015 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.789651 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.804966 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.813332 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 16:56:29.921918733 +0000 UTC Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.816446 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.833265 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.847428 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.860323 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:22Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.868537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.868581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.868592 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.868607 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.868619 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.970736 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.970802 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.970824 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.970856 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:22 crc kubenswrapper[4948]: I0120 19:50:22.970880 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:22Z","lastTransitionTime":"2026-01-20T19:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.074233 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.074288 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.074299 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.074318 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.074330 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.177754 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.177813 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.177822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.177836 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.177845 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.280400 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.280469 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.280485 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.280508 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.280524 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.386607 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.386686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.386715 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.386739 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.386753 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.489684 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.489814 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.489882 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.489909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.489931 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.569586 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:23 crc kubenswrapper[4948]: E0120 19:50:23.569923 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.593205 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.593279 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.593303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.593332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.593354 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.696564 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.696635 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.696657 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.696682 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.696699 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.799954 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.800024 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.800053 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.800081 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.800103 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.814325 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 12:16:45.478244749 +0000 UTC Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.902818 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.902860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.902872 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.902888 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:23 crc kubenswrapper[4948]: I0120 19:50:23.902901 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:23Z","lastTransitionTime":"2026-01-20T19:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.005191 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.005632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.005811 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.005927 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.006010 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.060365 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.060641 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.060891 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs podName:dbfcfce6-0ab8-40ba-80b2-d391a7dd5418 nodeName:}" failed. No retries permitted until 2026-01-20 19:50:40.060852992 +0000 UTC m=+68.011578011 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs") pod "network-metrics-daemon-h4c6s" (UID: "dbfcfce6-0ab8-40ba-80b2-d391a7dd5418") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.109662 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.109716 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.109727 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.109741 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.109751 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.212990 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.213029 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.213041 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.213057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.213070 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.316046 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.316312 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.316382 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.316452 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.316516 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.419146 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.419211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.419229 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.419253 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.419269 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.522127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.522211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.522227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.522253 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.522269 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.569391 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.569396 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.569756 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.569804 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.569910 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.570010 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.624917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.624967 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.624980 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.624999 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.625012 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.640977 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.641029 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.641046 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.641067 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.641083 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.656306 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:24Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.660483 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.660512 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.660522 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.660537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.660548 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.671614 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:24Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.674609 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.674678 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.674728 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.674761 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.674785 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.687673 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:24Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.691748 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.691819 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.691838 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.691860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.691877 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.708048 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:24Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.713162 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.713209 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.713226 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.713323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.713340 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.726420 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:24Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:24 crc kubenswrapper[4948]: E0120 19:50:24.726653 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.728201 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.728241 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.728251 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.728266 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.728278 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.814814 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 07:50:10.312753902 +0000 UTC Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.831161 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.831197 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.831209 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.831225 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.831235 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.934600 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.934658 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.934676 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.934701 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:24 crc kubenswrapper[4948]: I0120 19:50:24.934748 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:24Z","lastTransitionTime":"2026-01-20T19:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.036359 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.036403 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.036412 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.036424 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.036433 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.139233 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.139546 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.139639 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.139779 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.139919 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.241942 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.241986 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.242001 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.242021 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.242035 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.344332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.344367 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.344374 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.344388 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.344399 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.447080 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.447122 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.447130 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.447143 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.447153 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.548837 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.548891 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.548910 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.548928 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.548941 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.569829 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:25 crc kubenswrapper[4948]: E0120 19:50:25.570218 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.650546 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.650591 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.650604 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.650620 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.650630 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.752627 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.752688 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.752731 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.752759 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.752809 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.815270 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 01:07:48.449441296 +0000 UTC Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.855866 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.855928 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.855967 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.855999 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.856026 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.958738 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.958788 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.958803 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.958822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:25 crc kubenswrapper[4948]: I0120 19:50:25.958839 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:25Z","lastTransitionTime":"2026-01-20T19:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.061556 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.061631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.061652 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.061679 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.061700 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.163796 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.163834 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.163845 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.163867 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.163881 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.266370 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.266764 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.266912 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.267074 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.267224 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.369631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.369670 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.369679 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.369692 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.369772 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.472022 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.472064 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.472078 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.472102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.472115 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.569917 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:26 crc kubenswrapper[4948]: E0120 19:50:26.570053 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.570328 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:26 crc kubenswrapper[4948]: E0120 19:50:26.570387 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.570599 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:26 crc kubenswrapper[4948]: E0120 19:50:26.570656 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.576545 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.576579 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.576589 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.576602 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.576613 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.679024 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.679079 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.679091 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.679110 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.679121 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.782847 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.782937 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.782952 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.782984 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.782999 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.816241 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 20:49:53.666736351 +0000 UTC Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.886539 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.886602 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.886627 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.886650 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.886666 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.989689 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.989791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.989815 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.989846 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:26 crc kubenswrapper[4948]: I0120 19:50:26.989868 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:26Z","lastTransitionTime":"2026-01-20T19:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.093025 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.093086 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.093106 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.093136 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.093153 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.195667 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.195737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.195751 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.195768 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.196134 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.299507 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.299896 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.300364 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.300611 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.300865 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.403199 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.403227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.403237 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.403253 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.403264 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.505108 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.505150 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.505160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.505174 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.505185 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.569511 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:27 crc kubenswrapper[4948]: E0120 19:50:27.570071 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.607483 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.607544 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.607569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.607596 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.607615 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.709828 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.709899 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.709929 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.709956 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.709976 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.812807 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.812892 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.812916 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.812946 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.812974 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.816993 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 02:29:14.91799218 +0000 UTC Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.915567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.915616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.915632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.915654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:27 crc kubenswrapper[4948]: I0120 19:50:27.915668 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:27Z","lastTransitionTime":"2026-01-20T19:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.018474 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.018725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.018837 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.018920 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.018989 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.121075 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.121306 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.121394 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.121493 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.121639 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.223932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.224002 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.224016 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.224033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.224044 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.326218 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.326254 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.326263 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.326276 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.326287 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.428568 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.428642 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.428665 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.428689 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.428739 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.531343 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.531393 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.531408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.531429 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.531440 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.569396 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.569481 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.569422 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:28 crc kubenswrapper[4948]: E0120 19:50:28.569639 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:28 crc kubenswrapper[4948]: E0120 19:50:28.569881 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:28 crc kubenswrapper[4948]: E0120 19:50:28.570231 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.634006 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.634090 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.634110 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.634138 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.634152 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.737184 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.737213 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.737221 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.737233 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.737241 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.817855 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 14:15:09.289051042 +0000 UTC Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.839359 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.839664 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.839755 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.839831 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.839900 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.941861 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.941892 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.941900 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.941912 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:28 crc kubenswrapper[4948]: I0120 19:50:28.941921 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:28Z","lastTransitionTime":"2026-01-20T19:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.044768 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.044846 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.044864 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.044890 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.044911 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.146919 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.146963 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.146976 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.146995 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.147008 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.249374 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.249640 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.249736 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.249915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.249990 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.353298 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.353523 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.353618 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.353693 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.353792 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.456307 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.456644 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.456781 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.456918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.457067 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.559748 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.559978 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.560043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.560116 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.560182 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.568869 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:29 crc kubenswrapper[4948]: E0120 19:50:29.569139 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.663090 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.663127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.663137 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.663152 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.663163 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.766369 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.766435 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.766460 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.766490 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.766515 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.818962 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 19:49:33.848460862 +0000 UTC Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.868602 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.868624 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.868631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.868642 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.868650 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.970692 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.970747 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.970758 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.970793 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:29 crc kubenswrapper[4948]: I0120 19:50:29.970803 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:29Z","lastTransitionTime":"2026-01-20T19:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.073166 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.073223 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.073239 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.073260 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.073275 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.175833 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.175915 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.175941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.175974 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.175997 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.278863 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.278904 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.278918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.278935 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.278945 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.381425 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.381486 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.381498 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.381520 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.381535 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.483723 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.483753 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.483763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.483777 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.483786 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.569512 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.569550 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:30 crc kubenswrapper[4948]: E0120 19:50:30.569618 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.569733 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:30 crc kubenswrapper[4948]: E0120 19:50:30.569858 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:30 crc kubenswrapper[4948]: E0120 19:50:30.569954 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.585429 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.585470 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.585479 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.585493 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.585504 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.688303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.688341 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.688349 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.688363 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.688373 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.791135 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.791232 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.791246 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.791265 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.791277 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.819759 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 02:34:24.995038748 +0000 UTC Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.894378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.894446 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.894473 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.894504 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.894526 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.996674 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.996739 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.996755 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.996777 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:30 crc kubenswrapper[4948]: I0120 19:50:30.996791 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:30Z","lastTransitionTime":"2026-01-20T19:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.099417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.099781 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.099814 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.099848 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.099872 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.201808 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.201854 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.201866 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.201882 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.201897 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.304525 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.304772 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.304859 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.304970 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.305047 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.407186 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.407263 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.407282 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.407306 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.407324 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.510585 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.510641 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.510651 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.510665 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.510690 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.569316 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:31 crc kubenswrapper[4948]: E0120 19:50:31.569966 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.570464 4948 scope.go:117] "RemoveContainer" containerID="d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.613661 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.613865 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.613877 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.613893 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.613909 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.716545 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.716637 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.716654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.716674 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.716690 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.818849 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.818880 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.818890 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.818904 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.818914 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.820534 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 21:49:29.211546037 +0000 UTC Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.927911 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.927953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.927965 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.927981 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:31 crc kubenswrapper[4948]: I0120 19:50:31.927992 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:31Z","lastTransitionTime":"2026-01-20T19:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.029990 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.030022 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.030032 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.030045 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.030053 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.132247 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.132284 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.132295 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.132310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.132323 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.235256 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.235292 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.235300 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.235312 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.235321 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.244780 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/1.log" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.247756 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.248133 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.266693 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.307983 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.322128 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.337430 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.337469 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.337479 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.337495 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.337506 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.339307 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.362501 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:09Z\\\",\\\"message\\\":\\\"Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z]\\\\nI0120 19:50:09.787208 6330 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0120 19:50:09.78\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.375102 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.388979 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.403972 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.418934 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.432457 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.440381 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.440432 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.440441 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.440455 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.440465 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.445532 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.457939 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.469666 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.493892 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.508989 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.545428 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.548094 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.548130 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.548139 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.548156 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.548168 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.563993 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.568930 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.569148 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.569278 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:32 crc kubenswrapper[4948]: E0120 19:50:32.569357 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:32 crc kubenswrapper[4948]: E0120 19:50:32.569785 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:32 crc kubenswrapper[4948]: E0120 19:50:32.569881 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.580467 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.592960 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.603667 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.617463 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.629880 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.648852 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.649914 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.649950 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.649961 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.649978 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.649990 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.660924 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.674819 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.693000 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.709042 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.726905 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:09Z\\\",\\\"message\\\":\\\"Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z]\\\\nI0120 19:50:09.787208 6330 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0120 19:50:09.78\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.736590 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.748231 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.751505 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.751545 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.751556 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.751573 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.751582 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.762835 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.774132 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.785212 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.802324 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.812926 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.820820 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 04:31:34.323825409 +0000 UTC Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.823274 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:32Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.853837 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.853875 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.853885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.853900 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.853910 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.955612 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.955643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.955652 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.955665 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:32 crc kubenswrapper[4948]: I0120 19:50:32.955674 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:32Z","lastTransitionTime":"2026-01-20T19:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.058571 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.058606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.058615 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.058628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.058637 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.161947 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.162001 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.162016 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.162036 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.162051 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.257890 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/2.log" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.258686 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/1.log" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.261492 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" exitCode=1 Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.261534 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.261571 4948 scope.go:117] "RemoveContainer" containerID="d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.262301 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:50:33 crc kubenswrapper[4948]: E0120 19:50:33.262596 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.265017 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.265068 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.265084 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.265105 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.265116 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.278476 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.298131 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.328623 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.352339 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.366967 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.367163 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.367177 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.367185 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.367196 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.367205 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.380876 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.393481 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.404000 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.413499 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.425029 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.436476 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.450272 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.469390 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d39c4bceafe4fc123de61eb2e0f9d21df5101d222ff6c52965154d6d1ffc8f19\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:09Z\\\",\\\"message\\\":\\\"Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:09Z is after 2025-08-24T17:21:41Z]\\\\nI0120 19:50:09.787208 6330 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-api/cluster-autoscaler-operator]} name:Service_openshift-machine-api/cluster-autoscaler-operator_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.5.245:443: 10.217.5.245:9192:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {54fbe873-7e6d-475f-a0ad-8dd5f06d850d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI0120 19:50:09.78\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:32Z\\\",\\\"message\\\":\\\"6499 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:32.696388 6499 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696417 6499 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696442 6499 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696835 6499 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.699839 6499 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 19:50:32.699856 6499 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 19:50:32.699867 6499 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:32.699879 6499 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:32.699912 6499 factory.go:656] Stopping watch factory\\\\nI0120 19:50:32.699936 6499 ovnkube.go:599] Stopped ovnkube\\\\nI0120 19:50:32.699961 6499 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0120 19:50:32.699968 6499 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 19:50:32.699973 6499 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 19:50:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.470905 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.470930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.470941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.470957 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.470970 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.479412 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.491322 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.511921 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.523848 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.536034 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:33Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.569305 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:33 crc kubenswrapper[4948]: E0120 19:50:33.569428 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.572494 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.572529 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.572541 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.572560 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.572570 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.674547 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.674606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.674624 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.674644 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.674659 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.777253 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.777584 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.777597 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.777647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.777661 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.821620 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 09:24:07.308432786 +0000 UTC Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.879651 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.879725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.879743 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.879763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.879780 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.982087 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.982133 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.982143 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.982160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:33 crc kubenswrapper[4948]: I0120 19:50:33.982172 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:33Z","lastTransitionTime":"2026-01-20T19:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.084544 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.084616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.084640 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.084679 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.084696 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.186780 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.186814 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.186827 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.186842 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.186853 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.265206 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/2.log" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.268520 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.268730 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.281842 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.289226 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.289264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.289275 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.289291 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.289302 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.293899 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.306050 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.317173 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.326037 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.336929 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.353539 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:32Z\\\",\\\"message\\\":\\\"6499 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:32.696388 6499 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696417 6499 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696442 6499 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696835 6499 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.699839 6499 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 19:50:32.699856 6499 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 19:50:32.699867 6499 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:32.699879 6499 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:32.699912 6499 factory.go:656] Stopping watch factory\\\\nI0120 19:50:32.699936 6499 ovnkube.go:599] Stopped ovnkube\\\\nI0120 19:50:32.699961 6499 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0120 19:50:32.699968 6499 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 19:50:32.699973 6499 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 19:50:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.361425 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.369449 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.386519 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.390811 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.390837 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.390844 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.390856 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.390864 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.399067 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.408846 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.418824 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.429313 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.440523 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.451112 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.461681 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.473805 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.492150 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.492312 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.492388 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.492481 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.492562 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.569338 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.569365 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.569338 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.569457 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.569552 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.569609 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.594759 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.594801 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.594812 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.594829 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.594839 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.697549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.697590 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.697600 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.697613 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.697622 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.755671 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.755768 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.755794 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.755818 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.755834 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.769564 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.773357 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.773396 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.773408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.773422 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.773432 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.784161 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.787946 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.787975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.787983 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.787996 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.788005 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.797420 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.800063 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.800094 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.800105 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.800121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.800132 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.812287 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.815991 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.816015 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.816023 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.816036 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.816046 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.821877 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 08:52:15.27573842 +0000 UTC Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.826274 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:34Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:34 crc kubenswrapper[4948]: E0120 19:50:34.826426 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.831272 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.831310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.831321 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.831335 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.831346 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.933517 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.933563 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.933573 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.933584 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:34 crc kubenswrapper[4948]: I0120 19:50:34.933592 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:34Z","lastTransitionTime":"2026-01-20T19:50:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.036392 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.036432 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.036444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.036503 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.036525 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.138945 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.138973 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.138996 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.139010 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.139019 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.241332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.241398 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.241408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.241420 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.241428 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.344158 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.344318 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.344342 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.344398 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.344416 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.447484 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.447532 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.447547 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.447563 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.447576 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.550102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.550167 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.550178 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.550191 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.550202 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.569626 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:35 crc kubenswrapper[4948]: E0120 19:50:35.569782 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.652569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.652617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.652629 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.652642 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.652651 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.754504 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.754567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.754579 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.754596 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.754608 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.822125 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 19:15:12.405008151 +0000 UTC Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.856842 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.856889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.856908 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.856923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.856932 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.960148 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.960194 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.960204 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.960224 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:35 crc kubenswrapper[4948]: I0120 19:50:35.960242 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:35Z","lastTransitionTime":"2026-01-20T19:50:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.063419 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.063667 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.063693 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.063738 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.063757 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.167391 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.167489 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.167520 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.167549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.167570 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.271670 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.271735 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.271748 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.271761 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.271771 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.374118 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.374168 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.374183 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.374202 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.374213 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.477294 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.477338 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.477349 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.477375 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.477387 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.569367 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.569423 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:36 crc kubenswrapper[4948]: E0120 19:50:36.569539 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.569612 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:36 crc kubenswrapper[4948]: E0120 19:50:36.569749 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:36 crc kubenswrapper[4948]: E0120 19:50:36.569936 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.579051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.579121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.579144 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.579159 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.579170 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.681444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.681495 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.681504 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.681519 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.681528 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.783946 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.784007 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.784024 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.784047 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.784059 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.822245 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 22:08:23.736526717 +0000 UTC Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.886313 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.886362 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.886373 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.886389 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.886402 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.988870 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.988906 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.988917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.988931 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:36 crc kubenswrapper[4948]: I0120 19:50:36.988948 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:36Z","lastTransitionTime":"2026-01-20T19:50:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.091076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.091123 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.091133 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.091149 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.091159 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.193179 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.193227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.193240 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.193256 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.193268 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.295425 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.295461 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.295469 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.295482 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.295492 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.397800 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.397841 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.397852 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.397868 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.397877 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.499583 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.499616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.499628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.499642 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.499652 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.569490 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:37 crc kubenswrapper[4948]: E0120 19:50:37.569604 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.601909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.601951 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.601959 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.601976 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.601986 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.703953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.703987 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.703998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.704012 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.704020 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.806224 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.806258 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.806268 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.806280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.806288 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.822828 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 02:54:24.482773062 +0000 UTC Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.914047 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.914086 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.914101 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.914118 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:37 crc kubenswrapper[4948]: I0120 19:50:37.914131 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:37Z","lastTransitionTime":"2026-01-20T19:50:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.016521 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.016556 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.016567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.016581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.016592 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.118979 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.119027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.119045 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.119066 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.119080 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.221264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.221315 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.221327 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.221346 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.221364 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.323965 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.323992 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.324000 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.324011 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.324020 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.426286 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.426331 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.426342 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.426355 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.426365 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.528849 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.528885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.528895 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.528932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.528947 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.569904 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:38 crc kubenswrapper[4948]: E0120 19:50:38.570028 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.570201 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:38 crc kubenswrapper[4948]: E0120 19:50:38.570246 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.570437 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:38 crc kubenswrapper[4948]: E0120 19:50:38.570493 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.630501 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.630531 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.630540 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.630554 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.630563 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.732311 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.732345 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.732354 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.732369 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.732379 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.823452 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 16:47:41.791809578 +0000 UTC Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.834471 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.834806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.834825 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.834848 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.834864 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.938581 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.938619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.938628 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.938646 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:38 crc kubenswrapper[4948]: I0120 19:50:38.938656 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:38Z","lastTransitionTime":"2026-01-20T19:50:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.041033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.041062 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.041073 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.041086 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.041094 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.143077 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.143117 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.143127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.143142 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.143153 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.246698 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.246763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.246784 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.246797 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.246809 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.349377 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.349432 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.349446 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.349465 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.349480 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.451433 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.451480 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.451490 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.451516 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.451527 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.554072 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.554116 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.554127 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.554144 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.554156 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.569717 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:39 crc kubenswrapper[4948]: E0120 19:50:39.569860 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.657742 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.657806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.657820 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.657845 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.657860 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.760785 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.760827 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.760838 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.760853 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.760863 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.823556 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 08:12:52.50011942 +0000 UTC Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.863730 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.863779 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.863789 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.863807 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.863819 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.966459 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.966508 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.966518 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.966535 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:39 crc kubenswrapper[4948]: I0120 19:50:39.966546 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:39Z","lastTransitionTime":"2026-01-20T19:50:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.068851 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.068903 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.068917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.068935 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.068946 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.140881 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:40 crc kubenswrapper[4948]: E0120 19:50:40.141020 4948 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:40 crc kubenswrapper[4948]: E0120 19:50:40.141086 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs podName:dbfcfce6-0ab8-40ba-80b2-d391a7dd5418 nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.141069807 +0000 UTC m=+100.091794776 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs") pod "network-metrics-daemon-h4c6s" (UID: "dbfcfce6-0ab8-40ba-80b2-d391a7dd5418") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.171812 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.171851 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.171863 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.171879 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.171890 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.273458 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.273499 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.273507 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.273522 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.273535 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.375862 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.375907 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.375916 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.375934 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.375943 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.478302 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.478366 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.478383 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.478410 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.478426 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.568990 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.569055 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.569147 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:40 crc kubenswrapper[4948]: E0120 19:50:40.569140 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:40 crc kubenswrapper[4948]: E0120 19:50:40.569289 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:40 crc kubenswrapper[4948]: E0120 19:50:40.569340 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.580475 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.580520 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.580532 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.580549 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.580563 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.682953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.683002 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.683015 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.683036 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.683050 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.785039 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.785093 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.785104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.785118 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.785128 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.824228 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 17:09:54.859975022 +0000 UTC Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.887642 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.887733 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.887752 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.887780 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.887792 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.990686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.990801 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.990832 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.990862 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:40 crc kubenswrapper[4948]: I0120 19:50:40.990886 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:40Z","lastTransitionTime":"2026-01-20T19:50:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.094170 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.094222 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.094234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.094251 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.094262 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.197040 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.197102 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.197121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.197149 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.197168 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.299883 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.299931 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.299943 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.299960 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.299972 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.402408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.402450 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.402462 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.402479 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.402489 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.504361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.504629 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.504804 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.504916 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.505013 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.569296 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:41 crc kubenswrapper[4948]: E0120 19:50:41.569459 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.606873 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.606905 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.606914 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.606929 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.606940 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.708686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.708740 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.708753 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.708770 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.708780 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.811137 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.811172 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.811181 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.811196 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.811206 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.824949 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 17:24:47.334209716 +0000 UTC Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.913736 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.913958 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.914213 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.914291 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:41 crc kubenswrapper[4948]: I0120 19:50:41.914348 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:41Z","lastTransitionTime":"2026-01-20T19:50:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.016923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.017242 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.017317 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.017398 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.017462 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.122981 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.123230 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.123340 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.123471 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.123588 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.226197 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.226262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.226280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.226304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.226324 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.328265 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.328301 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.328316 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.328348 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.328362 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.431171 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.431212 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.431225 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.431240 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.431252 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.533964 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.534025 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.534036 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.534052 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.534062 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.569145 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:42 crc kubenswrapper[4948]: E0120 19:50:42.569349 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.569371 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:42 crc kubenswrapper[4948]: E0120 19:50:42.569521 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.569176 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:42 crc kubenswrapper[4948]: E0120 19:50:42.569832 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.624591 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.636059 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.636567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.636594 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.636604 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.636618 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.636628 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.648873 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.660975 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.671628 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.684035 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.692925 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.706573 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.724689 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:32Z\\\",\\\"message\\\":\\\"6499 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:32.696388 6499 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696417 6499 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696442 6499 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696835 6499 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.699839 6499 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 19:50:32.699856 6499 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 19:50:32.699867 6499 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:32.699879 6499 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:32.699912 6499 factory.go:656] Stopping watch factory\\\\nI0120 19:50:32.699936 6499 ovnkube.go:599] Stopped ovnkube\\\\nI0120 19:50:32.699961 6499 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0120 19:50:32.699968 6499 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 19:50:32.699973 6499 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 19:50:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.735217 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.738239 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.738259 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.738268 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.738280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.738289 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.748900 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.761342 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.773051 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.785104 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.796907 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.820779 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.825429 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 09:00:30.03394758 +0000 UTC Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.833854 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.840595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.840638 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.840649 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.840667 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.840677 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.846957 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:42Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.941992 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.942255 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.942417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.942520 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:42 crc kubenswrapper[4948]: I0120 19:50:42.942599 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:42Z","lastTransitionTime":"2026-01-20T19:50:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.044632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.044694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.044721 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.044735 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.044748 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.146818 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.146871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.146889 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.146910 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.146925 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.249013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.249049 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.249075 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.249087 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.249097 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.351864 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.351901 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.351912 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.352040 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.352056 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.454864 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.454904 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.454914 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.454928 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.454937 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.556996 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.557035 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.557045 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.557060 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.557070 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.569318 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:43 crc kubenswrapper[4948]: E0120 19:50:43.569470 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.659418 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.659457 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.659473 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.659493 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.659507 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.760981 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.761041 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.761051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.761063 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.761105 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.825579 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 20:59:55.834106453 +0000 UTC Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.863588 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.863633 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.863645 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.863664 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.863675 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.965725 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.965756 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.965763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.965776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:43 crc kubenswrapper[4948]: I0120 19:50:43.965785 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:43Z","lastTransitionTime":"2026-01-20T19:50:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.067832 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.067871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.067911 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.067927 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.067938 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.169824 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.169859 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.169869 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.169885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.169896 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.271654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.271760 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.271773 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.271791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.271803 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.374319 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.374403 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.374422 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.374443 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.374459 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.477098 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.477134 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.477145 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.477159 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.477170 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.569837 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.569867 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.569851 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.569978 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.570076 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.570206 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.579695 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.579761 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.579779 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.579800 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.579816 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.682491 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.682539 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.682550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.682568 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.682580 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.784953 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.784992 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.785003 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.785018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.785041 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.826288 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 10:25:48.037325962 +0000 UTC Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.831963 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.832011 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.832023 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.832043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.832056 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.848384 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:44Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.852871 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.852913 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.852926 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.852943 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.852956 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.868925 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:44Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.873951 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.874009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.874029 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.874062 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.874081 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.892797 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:44Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.896949 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.897004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.897018 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.897035 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.897048 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.913631 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:44Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.918048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.918088 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.918104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.918125 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.918140 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.932015 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:44Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:44 crc kubenswrapper[4948]: E0120 19:50:44.932182 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.933887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.933917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.933928 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.933947 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:44 crc kubenswrapper[4948]: I0120 19:50:44.933959 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:44Z","lastTransitionTime":"2026-01-20T19:50:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.037437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.037500 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.037513 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.037529 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.037541 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.140775 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.140838 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.140851 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.140887 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.140902 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.243121 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.243180 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.243198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.243223 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.243240 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.305086 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/0.log" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.305151 4948 generic.go:334] "Generic (PLEG): container finished" podID="e21ac8a2-1e79-4191-b809-75085d432b31" containerID="9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36" exitCode=1 Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.305189 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qttfm" event={"ID":"e21ac8a2-1e79-4191-b809-75085d432b31","Type":"ContainerDied","Data":"9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.305628 4948 scope.go:117] "RemoveContainer" containerID="9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.320812 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.333303 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.346976 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.347016 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.347028 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.347042 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.347051 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.347377 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.360527 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.378490 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"2026-01-20T19:49:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dff03a58-8f19-44f0-9c67-da652220c449\\\\n2026-01-20T19:49:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dff03a58-8f19-44f0-9c67-da652220c449 to /host/opt/cni/bin/\\\\n2026-01-20T19:49:59Z [verbose] multus-daemon started\\\\n2026-01-20T19:49:59Z [verbose] Readiness Indicator file check\\\\n2026-01-20T19:50:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.398148 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:32Z\\\",\\\"message\\\":\\\"6499 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:32.696388 6499 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696417 6499 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696442 6499 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696835 6499 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.699839 6499 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 19:50:32.699856 6499 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 19:50:32.699867 6499 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:32.699879 6499 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:32.699912 6499 factory.go:656] Stopping watch factory\\\\nI0120 19:50:32.699936 6499 ovnkube.go:599] Stopped ovnkube\\\\nI0120 19:50:32.699961 6499 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0120 19:50:32.699968 6499 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 19:50:32.699973 6499 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 19:50:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.408144 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.423031 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.436051 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.448341 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.449238 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.449273 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.449285 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.449304 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.449315 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.460241 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.470732 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.482166 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.501096 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.513959 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.525532 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.537742 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.551043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.551073 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.551082 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.551094 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.551102 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.551587 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:45Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.569878 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:45 crc kubenswrapper[4948]: E0120 19:50:45.569986 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.653929 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.653969 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.653985 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.654006 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.654022 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.756566 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.756596 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.756604 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.756616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.756623 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.826634 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 16:46:57.46733631 +0000 UTC Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.859874 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.859920 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.859930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.859950 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.859961 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.963511 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.963562 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.963601 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.963667 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:45 crc kubenswrapper[4948]: I0120 19:50:45.963741 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:45Z","lastTransitionTime":"2026-01-20T19:50:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.066567 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.066621 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.066635 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.066653 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.066664 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.169409 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.169464 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.169482 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.169509 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.169531 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.276894 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.276939 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.276952 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.276982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.276997 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.311277 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/0.log" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.311327 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qttfm" event={"ID":"e21ac8a2-1e79-4191-b809-75085d432b31","Type":"ContainerStarted","Data":"b41d2a53810cfb4c072af0d88429759b11509193add1fb0f10d77de4d747b8b4"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.331689 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.354385 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.366413 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.377338 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.379211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.379241 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.379251 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.379267 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.379278 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.387479 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.398485 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.409978 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.419035 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.431218 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.440116 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.450454 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b41d2a53810cfb4c072af0d88429759b11509193add1fb0f10d77de4d747b8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"2026-01-20T19:49:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dff03a58-8f19-44f0-9c67-da652220c449\\\\n2026-01-20T19:49:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dff03a58-8f19-44f0-9c67-da652220c449 to /host/opt/cni/bin/\\\\n2026-01-20T19:49:59Z [verbose] multus-daemon started\\\\n2026-01-20T19:49:59Z [verbose] Readiness Indicator file check\\\\n2026-01-20T19:50:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.470197 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:32Z\\\",\\\"message\\\":\\\"6499 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:32.696388 6499 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696417 6499 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696442 6499 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696835 6499 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.699839 6499 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 19:50:32.699856 6499 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 19:50:32.699867 6499 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:32.699879 6499 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:32.699912 6499 factory.go:656] Stopping watch factory\\\\nI0120 19:50:32.699936 6499 ovnkube.go:599] Stopped ovnkube\\\\nI0120 19:50:32.699961 6499 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0120 19:50:32.699968 6499 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 19:50:32.699973 6499 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 19:50:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.479131 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.481647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.481670 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.481679 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.481722 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.481732 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.492810 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.504555 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.516075 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.526486 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.536463 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:46Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.569875 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:46 crc kubenswrapper[4948]: E0120 19:50:46.569987 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.570145 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:46 crc kubenswrapper[4948]: E0120 19:50:46.570187 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.570789 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:50:46 crc kubenswrapper[4948]: E0120 19:50:46.570914 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.571042 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:46 crc kubenswrapper[4948]: E0120 19:50:46.571090 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.688575 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.688614 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.688625 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.688640 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.688652 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.790453 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.790487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.790496 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.790513 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.790523 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.827648 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 02:25:26.502122968 +0000 UTC Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.893737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.893826 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.893859 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.893891 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.893913 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.996320 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.996347 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.996354 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.996369 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:46 crc kubenswrapper[4948]: I0120 19:50:46.996398 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:46Z","lastTransitionTime":"2026-01-20T19:50:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.100155 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.100264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.100285 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.100343 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.100390 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.203180 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.203299 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.203323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.203352 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.203372 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.305945 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.305996 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.306010 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.306030 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.306046 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.409336 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.409440 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.409461 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.409486 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.409543 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.512347 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.512396 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.512412 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.512436 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.512454 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.569529 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:47 crc kubenswrapper[4948]: E0120 19:50:47.569741 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.615076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.615137 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.615156 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.615179 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.615196 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.718289 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.718436 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.718466 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.718495 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.718513 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.822602 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.822655 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.822673 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.822696 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.822780 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.828242 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 21:16:44.076763006 +0000 UTC Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.925591 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.925655 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.925675 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.925763 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:47 crc kubenswrapper[4948]: I0120 19:50:47.925789 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:47Z","lastTransitionTime":"2026-01-20T19:50:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.029021 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.029082 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.029103 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.029131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.029151 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.132300 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.132346 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.132354 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.132368 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.132378 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.234494 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.234531 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.234540 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.234554 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.234562 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.337176 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.337269 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.337293 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.337365 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.337389 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.440866 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.440928 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.440951 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.440981 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.441002 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.613227 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:48 crc kubenswrapper[4948]: E0120 19:50:48.613918 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.614082 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:48 crc kubenswrapper[4948]: E0120 19:50:48.614223 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.614342 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:48 crc kubenswrapper[4948]: E0120 19:50:48.614478 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.617595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.617730 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.617802 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.617895 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.617983 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.781043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.781094 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.781106 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.781131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.781143 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.829330 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 16:09:59.406683045 +0000 UTC Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.883839 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.883908 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.883925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.883949 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.883966 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.986645 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.986702 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.986751 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.986779 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:48 crc kubenswrapper[4948]: I0120 19:50:48.986803 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:48Z","lastTransitionTime":"2026-01-20T19:50:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.089482 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.089557 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.089583 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.089612 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.089635 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.192965 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.193037 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.193050 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.193090 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.193104 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.296332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.296406 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.296428 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.296455 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.296475 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.399860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.399904 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.399918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.399935 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.399946 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.503562 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.503660 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.503686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.503756 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.503782 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.569843 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:49 crc kubenswrapper[4948]: E0120 19:50:49.570050 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.606428 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.606481 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.606505 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.606534 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.606556 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.709801 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.709893 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.709911 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.709932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.709948 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.813170 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.813231 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.813248 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.813271 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.813289 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.830428 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 09:32:06.565101359 +0000 UTC Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.916964 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.917029 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.917051 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.917077 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:49 crc kubenswrapper[4948]: I0120 19:50:49.917094 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:49Z","lastTransitionTime":"2026-01-20T19:50:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.379888 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.379925 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.379976 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.380026 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.380038 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:50Z","lastTransitionTime":"2026-01-20T19:50:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.483146 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.483211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.483234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.483264 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.483286 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:50Z","lastTransitionTime":"2026-01-20T19:50:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.569649 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.569823 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:50 crc kubenswrapper[4948]: E0120 19:50:50.569851 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.569909 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:50 crc kubenswrapper[4948]: E0120 19:50:50.570051 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:50 crc kubenswrapper[4948]: E0120 19:50:50.570098 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.586576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.586607 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.586616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.586633 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.586643 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:50Z","lastTransitionTime":"2026-01-20T19:50:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.689844 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.689888 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.689902 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.689920 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.689933 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:50Z","lastTransitionTime":"2026-01-20T19:50:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.794157 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.794211 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.794228 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.794252 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.794270 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:50Z","lastTransitionTime":"2026-01-20T19:50:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.831312 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 07:56:06.012263583 +0000 UTC Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.897607 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.897680 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.897734 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.897766 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:50 crc kubenswrapper[4948]: I0120 19:50:50.897788 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:50Z","lastTransitionTime":"2026-01-20T19:50:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:50.999976 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.000038 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.000062 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.000089 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.000115 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.103430 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.103478 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.103490 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.103508 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.103521 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.205672 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.205724 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.205735 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.205749 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.205759 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.307674 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.307729 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.307738 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.307752 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.307762 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.503989 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.504034 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.504045 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.504062 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.504074 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.569217 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:51 crc kubenswrapper[4948]: E0120 19:50:51.569396 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.607217 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.607277 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.607294 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.607320 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.607342 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.710504 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.710547 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.710559 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.710576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.710589 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.813595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.813888 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.813971 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.814057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.814175 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.832109 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 19:38:37.746385284 +0000 UTC Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.916678 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.917010 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.917196 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.917370 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:51 crc kubenswrapper[4948]: I0120 19:50:51.917518 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:51Z","lastTransitionTime":"2026-01-20T19:50:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.020618 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.020676 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.020699 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.020765 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.020787 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.125873 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.126487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.126788 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.127095 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.127391 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.231112 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.231203 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.231226 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.231250 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.231267 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.334002 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.334060 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.334077 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.334099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.334116 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.438037 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.438117 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.438136 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.438160 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.438181 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.540833 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.540874 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.540900 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.540917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.540926 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.569830 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:52 crc kubenswrapper[4948]: E0120 19:50:52.569965 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.569830 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.569830 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:52 crc kubenswrapper[4948]: E0120 19:50:52.570028 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:52 crc kubenswrapper[4948]: E0120 19:50:52.570117 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.586905 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5ae89016a1d753ccd5c226cb02ff2334fd5b6505f6a6b814b0046e06342076f9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ks7vm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xg4hv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.605080 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c006e4-2994-4ab8-bdfc-90703054f20d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1262541d6ca4703456cbbe79bc6ed49a0dd411f1546e4bdf225c891abb891bec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://29b772748436ab97c1e674e13ec2a1166076ba60d272cd9a659aec5a7ca87130\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5a2c17bc0c668a9332c673c490e62f6e80a5509bd00bfe4b5b31b84cc3f7f44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19ccf078599f7931bb9c9f901967208cb6a25ef2831c4a44eea3ef983f2cf5e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:59Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c295d6e9cae1afd81f43a3733bb80baca0a8cca424251dc4ae2c6873f92620e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:00Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://712c38776bff1ce99ca576e68ead7fa95e87731f29e3a5e842ae4ed571116b97\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3212be5d89ac4d4cf7c0eb8ed4f1a20a749d03ca69426cdfb26828351772c9ea\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:50:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4q6jt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-ms8h8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.618557 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01cbd06e3d3a6fcd3fa26ae05e5f3ccca62370b097a3256ca5b835609680342d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.631249 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.645253 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.645323 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.645351 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.645397 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.645423 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.649815 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3efec1f-83f2-4e8a-9685-7ed3a6a7f45a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10745165aa51fc3cde1b1e6e0e13ee157bb0bdb0c7dd33e3ec9d2bb1b62f2071\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb26ea0aea98a51f67d866118395ce7c05be4cf399cd7748e484379e04bcbf97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58aae6b3810e49cde2418fbcd684e7695d08911807fd931dab05d4d690149455\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b9bc65a155de0d33705cee7b866647c293eab75a33646c7033fd85af42b1ddf6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.672918 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:56Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61afc71672c21643a4922b7d3d1bd96fc4377eecd7f06a802b6b395f591e403b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.686572 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-tx5bt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d2ed1457-1153-41b5-8cbc-56599eeecba5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c6eec5473fd7d5931d2897b0a89fb71e71ae29524fb0eddcb7c57c359e415430\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d4wlr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-tx5bt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.706030 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-qttfm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e21ac8a2-1e79-4191-b809-75085d432b31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b41d2a53810cfb4c072af0d88429759b11509193add1fb0f10d77de4d747b8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:44Z\\\",\\\"message\\\":\\\"2026-01-20T19:49:59+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_dff03a58-8f19-44f0-9c67-da652220c449\\\\n2026-01-20T19:49:59+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_dff03a58-8f19-44f0-9c67-da652220c449 to /host/opt/cni/bin/\\\\n2026-01-20T19:49:59Z [verbose] multus-daemon started\\\\n2026-01-20T19:49:59Z [verbose] Readiness Indicator file check\\\\n2026-01-20T19:50:44Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-prr4t\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-qttfm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.729158 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-20T19:50:32Z\\\",\\\"message\\\":\\\"6499 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0120 19:50:32.696388 6499 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696417 6499 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696442 6499 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.696835 6499 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0120 19:50:32.699839 6499 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0120 19:50:32.699856 6499 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0120 19:50:32.699867 6499 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0120 19:50:32.699879 6499 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0120 19:50:32.699912 6499 factory.go:656] Stopping watch factory\\\\nI0120 19:50:32.699936 6499 ovnkube.go:599] Stopped ovnkube\\\\nI0120 19:50:32.699961 6499 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0120 19:50:32.699968 6499 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0120 19:50:32.699973 6499 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0120 19:50:3\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:50:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rtkhq_openshift-ovn-kubernetes(b00db8b2-f5fb-476f-bfc1-95c125fdaaac)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-55f6g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rtkhq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.743515 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-g49xj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2bc5bb03-140b-42e9-a874-a6f4b6baeac0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c0721ed71e322d0b3a19af595ffd502b76517efbcc9a3afce7aa598bcd69936\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x7th5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:54Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-g49xj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.748336 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.748393 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.748412 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.748434 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.748453 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.758006 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b5e2c458-c544-45d1-ac7b-da99352dce17\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"730556 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI0120 19:49:50.730634 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0120 19:49:50.730688 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nI0120 19:49:50.730699 1 dynamic_serving_content.go:135] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\"\\\\nI0120 19:49:50.730664 1 tlsconfig.go:203] \\\\\\\"Loaded serving cert\\\\\\\" certName=\\\\\\\"serving-cert::/tmp/serving-cert-2685484887/tls.crt::/tmp/serving-cert-2685484887/tls.key\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"localhost\\\\\\\\\\\\\\\" [serving] validServingFor=[localhost] issuer=\\\\\\\\\\\\\\\"check-endpoints-signer@1768938574\\\\\\\\\\\\\\\" (2026-01-20 19:49:34 +0000 UTC to 2026-02-19 19:49:35 +0000 UTC (now=2026-01-20 19:49:50.730146345 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731099 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1768938585\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1768938585\\\\\\\\\\\\\\\" (2026-01-20 18:49:45 +0000 UTC to 2027-01-20 18:49:45 +0000 UTC (now=2026-01-20 19:49:50.73107969 +0000 UTC))\\\\\\\"\\\\nI0120 19:49:50.731135 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI0120 19:49:50.731156 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI0120 19:49:50.730647 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI0120 19:49:50.731166 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI0120 19:49:50.731391 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nF0120 19:49:50.732212 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.776746 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d26abef2-5a7f-49f2-8ff1-efa26022b52d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c1c255cea6c2914894cde228dcbbdadc1cd28f5cefd114c42077288a1dd5c45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f06102b93c93476cbe45f69fdf74e536951b647c073d4ae7b5afc4e97871d9ac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94a5d2ccfa4b3ba0ab9d42a444e062bd21f247612563af7e6a3adcabbe118eab\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.790400 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5dt6b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:08Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-h4c6s\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.809774 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:52Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b1bd68a45bf14a903cb58696ca95b3b886448ae4a3e74ce3232564b88c0bf2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1bd444d4a6cbf9dff4eeae6813b84a37ea870234ce8647f594a37be3d5fc676a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.833153 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 09:27:45.286869775 +0000 UTC Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.844990 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"639acb79-b41e-4a42-baa4-6830dbcc9bf5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c361f0131b501403888a51c07e9bbb58055ffb18d3753882cc7b97bd152847e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bb6f646561fd5d7ddb9f079d11b60e999475813045b3e31cf2c9d388e3829e5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://867fd2461032504529b03b6dac05c3984250d3af1d7924752b570db13a8a67d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dee8a1c2042f698a59c0941b44c876686e2ac10afe5ff2e8302a8aa322fbf7f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://43359082d4f3859c8a005361bf0d86f5fc63e32526767ee5e367741ff61e335a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ee0c7e9801f6f390d68e1d4be94a4ecba654e5c2a3c055ff853605e0f06410d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cc5f8fa32614352af58e99e4a8ab773e591baf8aba982e29258c8b3745a837e7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:33Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7b092f8e3a16a6b3566ae853fd9955d9d197c66321fbd8ab81e627a7ab586973\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-20T19:49:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-20T19:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:49:32Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.850645 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.850671 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.850681 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.850693 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.850717 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.860563 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.872447 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7d2a8aa-40b0-44d5-a210-c72d73b43f94\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd891ccfc2f7c653d15c603124139e7322cd277c60215b0086d5313f6fab68ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://655a82a1245dced1b2494a6fe1f63742718d0bb6452649a358bc12e72330d61d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-20T19:50:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qk4xw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-20T19:50:06Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qmlxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.888589 4948 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-20T19:49:50Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:52Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.953187 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.953222 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.953231 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.953246 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:52 crc kubenswrapper[4948]: I0120 19:50:52.953256 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:52Z","lastTransitionTime":"2026-01-20T19:50:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.055595 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.055634 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.055645 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.055662 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.055671 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.158747 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.158815 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.158835 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.158860 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.158878 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.261131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.261227 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.261282 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.261317 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.261345 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.364465 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.364530 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.364550 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.364578 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.364599 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.468331 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.468400 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.468423 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.468450 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.468474 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.570336 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:53 crc kubenswrapper[4948]: E0120 19:50:53.571233 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.572464 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.572542 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.572569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.572606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.572641 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.676189 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.676265 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.676290 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.676322 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.676345 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.779145 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.779252 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.779274 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.779302 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.779319 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.833259 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 16:05:39.336426541 +0000 UTC Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.881413 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.881472 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.881488 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.881505 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.881517 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.983647 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.983694 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.983722 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.983737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:53 crc kubenswrapper[4948]: I0120 19:50:53.983747 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:53Z","lastTransitionTime":"2026-01-20T19:50:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.085872 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.085909 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.085917 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.085930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.085939 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.190623 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.190726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.190745 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.190776 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.190793 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.293436 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.293469 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.293479 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.293492 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.293501 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.396569 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.396631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.396655 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.396686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.396759 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.499805 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.499850 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.499865 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.499885 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.499899 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.569116 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.569345 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.569345 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.569393 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.569477 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.569559 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.602764 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.602818 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.602836 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.602858 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.602877 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.706280 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.706361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.706386 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.706416 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.706439 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.809692 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.809789 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.809810 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.809835 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.809894 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.833550 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 04:45:01.990274698 +0000 UTC Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.834110 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.834252 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834384 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:58.834308061 +0000 UTC m=+146.785033070 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834430 4948 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.834484 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834502 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:51:58.834479436 +0000 UTC m=+146.785204445 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.834586 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834667 4948 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834677 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834732 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834753 4948 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834763 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-20 19:51:58.834741363 +0000 UTC m=+146.785466372 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.834754 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834811 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-20 19:51:58.834792634 +0000 UTC m=+146.785517653 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834917 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834945 4948 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.834966 4948 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:54 crc kubenswrapper[4948]: E0120 19:50:54.835076 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-20 19:51:58.835054272 +0000 UTC m=+146.785779281 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.913534 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.913611 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.913634 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.913667 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:54 crc kubenswrapper[4948]: I0120 19:50:54.913689 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:54Z","lastTransitionTime":"2026-01-20T19:50:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.016737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.016803 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.016822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.016849 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.016869 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.104233 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.104311 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.104325 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.104346 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.104360 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: E0120 19:50:55.124498 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.130618 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.130681 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.130699 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.131104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.131126 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: E0120 19:50:55.147076 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.151794 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.151822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.151835 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.151853 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.151865 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: E0120 19:50:55.175438 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.181131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.181171 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.181189 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.181216 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.181235 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: E0120 19:50:55.204564 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.211453 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.211497 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.211529 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.211552 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.211564 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: E0120 19:50:55.230866 4948 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148064Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608864Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-20T19:50:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"10576c92-8673-4ce7-85dc-a55a94bc568f\\\",\\\"systemUUID\\\":\\\"2cd9ef33-fc39-43ce-8f00-407ecd974be0\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-20T19:50:55Z is after 2025-08-24T17:21:41Z" Jan 20 19:50:55 crc kubenswrapper[4948]: E0120 19:50:55.231052 4948 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.234513 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.234600 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.234625 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.234658 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.234775 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.338838 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.338901 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.338919 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.338941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.338957 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.441341 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.441382 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.441395 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.441410 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.441422 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.543992 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.544104 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.544124 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.544151 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.544172 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.569425 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:55 crc kubenswrapper[4948]: E0120 19:50:55.569858 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.647368 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.647440 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.647461 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.647487 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.647504 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.750927 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.751009 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.751050 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.751085 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.751123 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.834282 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 20:20:58.721891109 +0000 UTC Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.854295 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.854606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.854828 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.854993 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.855145 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.958358 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.958419 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.958437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.958457 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:55 crc kubenswrapper[4948]: I0120 19:50:55.958471 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:55Z","lastTransitionTime":"2026-01-20T19:50:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.062414 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.062579 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.062607 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.062686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.062769 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.165819 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.165959 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.166027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.166071 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.166092 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.268066 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.268115 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.268130 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.268150 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.268164 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.370794 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.370921 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.370941 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.370961 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.370978 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.473043 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.473084 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.473096 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.473112 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.473124 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.569309 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.569393 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.570153 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:56 crc kubenswrapper[4948]: E0120 19:50:56.570133 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:56 crc kubenswrapper[4948]: E0120 19:50:56.570355 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:56 crc kubenswrapper[4948]: E0120 19:50:56.570630 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.575873 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.575912 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.575923 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.575940 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.575954 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.677790 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.677822 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.677831 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.677843 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.677851 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.780299 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.780361 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.780378 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.780402 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.780419 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.834913 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 04:39:21.485511713 +0000 UTC Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.883537 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.883603 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.883611 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.883625 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.883633 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.985503 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.985557 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.985572 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.985589 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:56 crc kubenswrapper[4948]: I0120 19:50:56.985603 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:56Z","lastTransitionTime":"2026-01-20T19:50:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.087332 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.087375 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.087385 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.087401 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.087412 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.190930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.190982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.191000 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.191028 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.191050 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.293944 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.294013 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.294027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.294048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.294064 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.397131 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.397235 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.397256 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.397302 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.397326 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.500506 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.500585 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.500600 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.500619 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.500633 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.569980 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:57 crc kubenswrapper[4948]: E0120 19:50:57.570153 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.602401 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.602434 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.602444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.602459 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.602471 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.705116 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.705181 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.705201 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.705230 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.705248 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.808099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.808158 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.808168 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.808193 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.808206 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.835474 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 19:36:24.919117879 +0000 UTC Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.912190 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.912277 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.912303 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.912340 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:57 crc kubenswrapper[4948]: I0120 19:50:57.912364 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:57Z","lastTransitionTime":"2026-01-20T19:50:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.015737 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.015794 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.015811 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.015839 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.015857 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.119190 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.119274 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.119292 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.119311 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.119360 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.221630 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.221702 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.221760 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.221785 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.221799 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.324565 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.324643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.324666 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.324698 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.324753 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.426883 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.426921 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.426939 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.426972 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.426995 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.529840 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.529961 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.529975 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.529995 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.530005 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.569469 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:50:58 crc kubenswrapper[4948]: E0120 19:50:58.569584 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.569806 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:50:58 crc kubenswrapper[4948]: E0120 19:50:58.569853 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.569978 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:50:58 crc kubenswrapper[4948]: E0120 19:50:58.570062 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.632945 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.632993 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.633004 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.633021 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.633038 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.735559 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.735606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.735616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.735632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.735643 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.836493 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 01:55:21.550974914 +0000 UTC Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.838376 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.838450 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.838464 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.838482 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.838493 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.940573 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.940633 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.940655 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.940682 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:58 crc kubenswrapper[4948]: I0120 19:50:58.940699 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:58Z","lastTransitionTime":"2026-01-20T19:50:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.043198 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.043267 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.043286 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.043309 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.043327 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.146055 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.146116 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.146132 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.146155 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.146171 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.248552 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.248631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.248655 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.248686 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.248752 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.351225 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.351266 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.351288 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.351305 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.351320 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.453773 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.453840 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.453857 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.453879 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.453894 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.557382 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.557417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.557446 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.557460 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.557471 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.569966 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:50:59 crc kubenswrapper[4948]: E0120 19:50:59.570136 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.660563 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.660632 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.660643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.660659 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.660669 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.763436 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.763486 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.763501 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.763522 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.763540 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.837182 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 23:22:29.909900853 +0000 UTC Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.865957 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.866015 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.866025 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.866038 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.866048 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.969111 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.969205 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.969234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.969258 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:50:59 crc kubenswrapper[4948]: I0120 19:50:59.969278 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:50:59Z","lastTransitionTime":"2026-01-20T19:50:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.071351 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.071393 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.071404 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.071440 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.071451 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.175082 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.175155 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.175172 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.175188 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.175200 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.277876 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.277910 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.277918 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.277930 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.277940 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.381397 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.381445 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.381458 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.381475 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.381488 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.484008 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.484076 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.484099 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.484129 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.484149 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.569506 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.569547 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:51:00 crc kubenswrapper[4948]: E0120 19:51:00.569632 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.569511 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.570198 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:51:00 crc kubenswrapper[4948]: E0120 19:51:00.570467 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:51:00 crc kubenswrapper[4948]: E0120 19:51:00.570583 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.586932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.586982 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.587000 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.587027 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.587048 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.690262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.690321 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.690336 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.690357 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.690373 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.793617 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.794201 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.794214 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.794234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.794246 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.837793 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 20:19:31.576450087 +0000 UTC Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.896974 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.897033 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.897048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.897069 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:00 crc kubenswrapper[4948]: I0120 19:51:00.897084 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:00Z","lastTransitionTime":"2026-01-20T19:51:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.000011 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.000048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.000057 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.000070 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.000080 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:01Z","lastTransitionTime":"2026-01-20T19:51:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.102336 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.102377 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.102385 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.102398 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.102408 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:01Z","lastTransitionTime":"2026-01-20T19:51:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.204310 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.204340 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.204352 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.204365 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.204375 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:01Z","lastTransitionTime":"2026-01-20T19:51:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.728400 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:51:01 crc kubenswrapper[4948]: E0120 19:51:01.728577 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.731875 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.731938 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.731959 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.731987 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.732011 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:01Z","lastTransitionTime":"2026-01-20T19:51:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.736698 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/2.log" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.742770 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerStarted","Data":"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331"} Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.744682 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.827746 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=67.827683205 podStartE2EDuration="1m7.827683205s" podCreationTimestamp="2026-01-20 19:49:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:01.820102077 +0000 UTC m=+89.770827076" watchObservedRunningTime="2026-01-20 19:51:01.827683205 +0000 UTC m=+89.778408174" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.834249 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.834291 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.834305 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.834324 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.834340 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:01Z","lastTransitionTime":"2026-01-20T19:51:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.838399 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 02:31:14.484815631 +0000 UTC Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.936977 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.937019 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.937031 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.937048 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.937059 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:01Z","lastTransitionTime":"2026-01-20T19:51:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:01 crc kubenswrapper[4948]: I0120 19:51:01.979357 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qmlxv" podStartSLOduration=69.979338862 podStartE2EDuration="1m9.979338862s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:01.948519347 +0000 UTC m=+89.899244316" watchObservedRunningTime="2026-01-20 19:51:01.979338862 +0000 UTC m=+89.930063831" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.039580 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.039622 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.039631 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.039643 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.039652 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.046123 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-ms8h8" podStartSLOduration=71.046108923 podStartE2EDuration="1m11.046108923s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.045522577 +0000 UTC m=+89.996247546" watchObservedRunningTime="2026-01-20 19:51:02.046108923 +0000 UTC m=+89.996833892" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.046268 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podStartSLOduration=71.046263577 podStartE2EDuration="1m11.046263577s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.024282374 +0000 UTC m=+89.975007343" watchObservedRunningTime="2026-01-20 19:51:02.046263577 +0000 UTC m=+89.996988546" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.128378 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-g49xj" podStartSLOduration=71.128361408 podStartE2EDuration="1m11.128361408s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.060986411 +0000 UTC m=+90.011711380" watchObservedRunningTime="2026-01-20 19:51:02.128361408 +0000 UTC m=+90.079086377" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.141998 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.142024 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.142034 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.142046 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.142055 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.147256 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=71.147242075 podStartE2EDuration="1m11.147242075s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.128649606 +0000 UTC m=+90.079374575" watchObservedRunningTime="2026-01-20 19:51:02.147242075 +0000 UTC m=+90.097967044" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.167408 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=72.167393298 podStartE2EDuration="1m12.167393298s" podCreationTimestamp="2026-01-20 19:49:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.148120349 +0000 UTC m=+90.098845318" watchObservedRunningTime="2026-01-20 19:51:02.167393298 +0000 UTC m=+90.118118257" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.167872 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=44.167868931 podStartE2EDuration="44.167868931s" podCreationTimestamp="2026-01-20 19:50:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.167000897 +0000 UTC m=+90.117725866" watchObservedRunningTime="2026-01-20 19:51:02.167868931 +0000 UTC m=+90.118593900" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.200860 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-tx5bt" podStartSLOduration=71.200842655 podStartE2EDuration="1m11.200842655s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.200745832 +0000 UTC m=+90.151470801" watchObservedRunningTime="2026-01-20 19:51:02.200842655 +0000 UTC m=+90.151567624" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.230977 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-qttfm" podStartSLOduration=71.230962581 podStartE2EDuration="1m11.230962581s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.230141148 +0000 UTC m=+90.180866117" watchObservedRunningTime="2026-01-20 19:51:02.230962581 +0000 UTC m=+90.181687550" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.244437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.244477 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.244486 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.244501 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.244511 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.265499 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podStartSLOduration=71.265477907 podStartE2EDuration="1m11.265477907s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:02.25501674 +0000 UTC m=+90.205741709" watchObservedRunningTime="2026-01-20 19:51:02.265477907 +0000 UTC m=+90.216202876" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.386528 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.386575 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.386587 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.386606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.386617 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.489463 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.489507 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.489518 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.489533 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.489544 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.569870 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:51:02 crc kubenswrapper[4948]: E0120 19:51:02.569985 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.570180 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:51:02 crc kubenswrapper[4948]: E0120 19:51:02.570252 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.570282 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:51:02 crc kubenswrapper[4948]: E0120 19:51:02.570457 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.592234 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.592289 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.592313 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.592340 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.592362 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.695662 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.695757 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.695773 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.695791 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.695807 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.750835 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-h4c6s"] Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.750977 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:51:02 crc kubenswrapper[4948]: E0120 19:51:02.751081 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.806488 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.806542 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.806555 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.806576 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.806591 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.838854 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 04:37:20.489038919 +0000 UTC Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.909381 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.909408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.909417 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.909429 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:02 crc kubenswrapper[4948]: I0120 19:51:02.909437 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:02Z","lastTransitionTime":"2026-01-20T19:51:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.012343 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.012427 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.012442 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.012461 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.012473 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.114179 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.114473 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.114482 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.114494 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.114503 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.216408 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.216437 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.216444 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.216456 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.216465 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.319270 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.319347 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.319359 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.319377 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.319391 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.422806 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.422869 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.422888 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.422913 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.422936 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.526606 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.526745 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.526779 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.526809 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.526833 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.630209 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.630271 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.630289 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.630313 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.630331 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.733040 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.733106 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.733119 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.733135 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.733147 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.835062 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.835106 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.835118 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.835135 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.835147 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.839610 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 17:32:17.757255847 +0000 UTC Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.938545 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.938597 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.938615 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.938639 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:03 crc kubenswrapper[4948]: I0120 19:51:03.938656 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:03Z","lastTransitionTime":"2026-01-20T19:51:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.041291 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.041336 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.041347 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.041364 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.041395 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:04Z","lastTransitionTime":"2026-01-20T19:51:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.144397 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.144461 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.144478 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.144500 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.144517 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:04Z","lastTransitionTime":"2026-01-20T19:51:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.246637 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.246689 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.246726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.246748 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.246764 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:04Z","lastTransitionTime":"2026-01-20T19:51:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.349262 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.349298 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.349308 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.349326 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.349344 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:04Z","lastTransitionTime":"2026-01-20T19:51:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.452616 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.452662 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.452681 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.452726 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.452741 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:04Z","lastTransitionTime":"2026-01-20T19:51:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.569500 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.569651 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.569949 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:51:04 crc kubenswrapper[4948]: E0120 19:51:04.570095 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.570166 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:51:04 crc kubenswrapper[4948]: E0120 19:51:04.570258 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-h4c6s" podUID="dbfcfce6-0ab8-40ba-80b2-d391a7dd5418" Jan 20 19:51:04 crc kubenswrapper[4948]: E0120 19:51:04.570525 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 20 19:51:04 crc kubenswrapper[4948]: E0120 19:51:04.570783 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.839817 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 06:13:09.829634944 +0000 UTC Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.864456 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.864497 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.864508 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.864526 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.864540 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:04Z","lastTransitionTime":"2026-01-20T19:51:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.865954 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.971001 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.971058 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.971073 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.971092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:04 crc kubenswrapper[4948]: I0120 19:51:04.971108 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:04Z","lastTransitionTime":"2026-01-20T19:51:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.074015 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.074092 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.074116 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.074147 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.074168 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:05Z","lastTransitionTime":"2026-01-20T19:51:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.176863 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.176916 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.176932 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.176952 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.176966 4948 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-20T19:51:05Z","lastTransitionTime":"2026-01-20T19:51:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.279575 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.279636 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.279654 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.279678 4948 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.279910 4948 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.330466 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k2czh"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.331346 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.331426 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.331874 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.332471 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.334172 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b9nsx"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.334571 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.335184 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.335595 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.339762 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.339983 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.341443 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.342000 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.342074 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 20 19:51:05 crc kubenswrapper[4948]: W0120 19:51:05.346604 4948 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv": failed to list *v1.Secret: secrets "openshift-apiserver-operator-dockercfg-xtcjv" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Jan 20 19:51:05 crc kubenswrapper[4948]: E0120 19:51:05.346649 4948 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-xtcjv\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-operator-dockercfg-xtcjv\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.346931 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.348671 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.349197 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.349552 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: W0120 19:51:05.349722 4948 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Jan 20 19:51:05 crc kubenswrapper[4948]: E0120 19:51:05.349750 4948 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.349859 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: W0120 19:51:05.351331 4948 reflector.go:561] object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c": failed to list *v1.Secret: secrets "openshift-controller-manager-sa-dockercfg-msq4c" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager": no relationship found between node 'crc' and this object Jan 20 19:51:05 crc kubenswrapper[4948]: E0120 19:51:05.351397 4948 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-msq4c\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-controller-manager-sa-dockercfg-msq4c\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.351624 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 19:51:05 crc kubenswrapper[4948]: W0120 19:51:05.351906 4948 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config": failed to list *v1.ConfigMap: configmaps "openshift-apiserver-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Jan 20 19:51:05 crc kubenswrapper[4948]: E0120 19:51:05.351932 4948 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-apiserver-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 20 19:51:05 crc kubenswrapper[4948]: W0120 19:51:05.351995 4948 reflector.go:561] object-"openshift-apiserver-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Jan 20 19:51:05 crc kubenswrapper[4948]: E0120 19:51:05.352012 4948 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 20 19:51:05 crc kubenswrapper[4948]: W0120 19:51:05.352095 4948 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert": failed to list *v1.Secret: secrets "openshift-apiserver-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Jan 20 19:51:05 crc kubenswrapper[4948]: E0120 19:51:05.352117 4948 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.352251 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.352746 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.352939 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.354261 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.354510 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.354547 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.354811 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.355113 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.355299 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.355434 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.355472 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.357670 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.358011 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.358129 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.358314 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.358788 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.358869 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.358926 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.359171 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.359388 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.359567 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.363033 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-9kr4w"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.363550 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vxm8l"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.363691 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.363837 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-k4c6c"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.364201 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.364584 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365155 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-etcd-serving-ca\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365240 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-serving-cert\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365313 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365332 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365375 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365370 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22c78\" (UniqueName: \"kubernetes.io/projected/337527e2-a869-4df8-988d-66bf559e348d-kube-api-access-22c78\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365473 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/99ae8982-f499-4219-9a53-8d76189324d5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365508 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrvxd\" (UniqueName: \"kubernetes.io/projected/11a0fa78-3646-42ca-a01a-8d93d78d669e-kube-api-access-wrvxd\") pod \"cluster-samples-operator-665b6dd947-xgspc\" (UID: \"11a0fa78-3646-42ca-a01a-8d93d78d669e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365533 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-config\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365577 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-encryption-config\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365614 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/99ae8982-f499-4219-9a53-8d76189324d5-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365659 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/337527e2-a869-4df8-988d-66bf559e348d-audit-dir\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365684 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/99ae8982-f499-4219-9a53-8d76189324d5-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365732 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-etcd-client\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365754 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-client-ca\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365806 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21157116-8790-4342-ba0d-e356baad7ae1-serving-cert\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365828 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsfg6\" (UniqueName: \"kubernetes.io/projected/21157116-8790-4342-ba0d-e356baad7ae1-kube-api-access-rsfg6\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365853 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365901 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-config\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365924 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-audit\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.365987 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/99ae8982-f499-4219-9a53-8d76189324d5-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-client-ca\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366107 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/487f8971-88dc-4ebe-9d67-3b48284c72f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366216 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-image-import-ca\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366308 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmhsr\" (UniqueName: \"kubernetes.io/projected/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-kube-api-access-bmhsr\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366355 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/11a0fa78-3646-42ca-a01a-8d93d78d669e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-xgspc\" (UID: \"11a0fa78-3646-42ca-a01a-8d93d78d669e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366392 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcp74\" (UniqueName: \"kubernetes.io/projected/487f8971-88dc-4ebe-9d67-3b48284c72f9-kube-api-access-zcp74\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366425 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/337527e2-a869-4df8-988d-66bf559e348d-node-pullsecrets\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366455 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/487f8971-88dc-4ebe-9d67-3b48284c72f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366519 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99ae8982-f499-4219-9a53-8d76189324d5-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366549 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-config\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.366572 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-serving-cert\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.370325 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.370662 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.375006 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.375213 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-lxvjj"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.375923 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.376030 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.376455 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.376580 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.378627 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.379308 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.386894 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.393624 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.395290 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.396397 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.397142 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.397863 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.398309 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.398635 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.400270 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-2gfvd"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.401603 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.403808 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.404979 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.405137 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.405389 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.406785 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.407032 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.407085 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.407135 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.409675 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.409689 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.410836 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.412878 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.435753 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.439454 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.440297 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.440384 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.440528 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.440588 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.440610 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.440684 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.440757 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.441340 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.442441 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.443035 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-d86b9"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.443818 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.449219 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.449825 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.451230 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.452055 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.452529 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.453451 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.453524 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.455822 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.456015 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.456271 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.456407 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.456508 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.456739 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.457673 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.458394 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.458528 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.460110 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bwm86"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.460423 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.460560 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.462867 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.463963 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.465441 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.465643 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.465934 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.466056 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.468831 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/99ae8982-f499-4219-9a53-8d76189324d5-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.468899 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx4pw\" (UniqueName: \"kubernetes.io/projected/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-kube-api-access-hx4pw\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.468931 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe6d297c-7bfa-4431-9b33-374d4ae3b503-config\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.468959 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-etcd-client\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.468983 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/337527e2-a869-4df8-988d-66bf559e348d-audit-dir\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.469093 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/99ae8982-f499-4219-9a53-8d76189324d5-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.470780 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/99ae8982-f499-4219-9a53-8d76189324d5-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.470828 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-client-ca\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.470857 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.470899 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21157116-8790-4342-ba0d-e356baad7ae1-serving-cert\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.470925 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsfg6\" (UniqueName: \"kubernetes.io/projected/21157116-8790-4342-ba0d-e356baad7ae1-kube-api-access-rsfg6\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.470958 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdwm4\" (UniqueName: \"kubernetes.io/projected/4a88cd6c-06ab-471e-b7c1-e87b957e4392-kube-api-access-mdwm4\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.470985 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471009 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-policies\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471034 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7g2c\" (UniqueName: \"kubernetes.io/projected/fe57b94e-b773-4dc8-9a99-a2217ab4040c-kube-api-access-z7g2c\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471058 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471086 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-config\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471114 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4fjf\" (UniqueName: \"kubernetes.io/projected/ac50a1ff-ffd6-4c97-b685-04d5e9740183-kube-api-access-b4fjf\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471141 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471170 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cbwk\" (UniqueName: \"kubernetes.io/projected/dc247eab-6778-41d7-a69d-c551c989814e-kube-api-access-9cbwk\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471192 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-encryption-config\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471216 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-audit\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471241 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471287 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-client-ca\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471318 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/487f8971-88dc-4ebe-9d67-3b48284c72f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471346 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/99ae8982-f499-4219-9a53-8d76189324d5-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471373 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-serving-cert\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471402 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471426 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84wlp\" (UniqueName: \"kubernetes.io/projected/fe6d297c-7bfa-4431-9b33-374d4ae3b503-kube-api-access-84wlp\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471452 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-image-import-ca\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.471774 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.481954 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-trusted-ca-bundle\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.482056 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-oauth-serving-cert\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.482107 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fe6d297c-7bfa-4431-9b33-374d4ae3b503-trusted-ca\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.482146 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmhsr\" (UniqueName: \"kubernetes.io/projected/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-kube-api-access-bmhsr\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.482178 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpbml\" (UniqueName: \"kubernetes.io/projected/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-kube-api-access-cpbml\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.487938 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.490875 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a827077f-10f7-4609-93bc-14cd2b7889b4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.490952 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdhj8\" (UniqueName: \"kubernetes.io/projected/0d15401f-919f-4d4e-b466-91d2d0125952-kube-api-access-xdhj8\") pod \"dns-operator-744455d44c-d86b9\" (UID: \"0d15401f-919f-4d4e-b466-91d2d0125952\") " pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491001 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/11a0fa78-3646-42ca-a01a-8d93d78d669e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-xgspc\" (UID: \"11a0fa78-3646-42ca-a01a-8d93d78d669e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491027 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcp74\" (UniqueName: \"kubernetes.io/projected/487f8971-88dc-4ebe-9d67-3b48284c72f9-kube-api-access-zcp74\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491056 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-dir\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491083 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a88cd6c-06ab-471e-b7c1-e87b957e4392-config\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491104 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d15401f-919f-4d4e-b466-91d2d0125952-metrics-tls\") pod \"dns-operator-744455d44c-d86b9\" (UID: \"0d15401f-919f-4d4e-b466-91d2d0125952\") " pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491134 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/337527e2-a869-4df8-988d-66bf559e348d-node-pullsecrets\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491158 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-config\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491185 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-serving-cert\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491223 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491253 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491287 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/487f8971-88dc-4ebe-9d67-3b48284c72f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491317 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99ae8982-f499-4219-9a53-8d76189324d5-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491350 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491375 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-service-ca-bundle\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491407 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-config\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491437 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491470 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491497 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-config\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491522 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-oauth-config\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491548 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ac50a1ff-ffd6-4c97-b685-04d5e9740183-audit-dir\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491584 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-serving-cert\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491609 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc247eab-6778-41d7-a69d-c551c989814e-serving-cert\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491634 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a827077f-10f7-4609-93bc-14cd2b7889b4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491663 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-audit-policies\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491918 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-etcd-serving-ca\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491946 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-serving-cert\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491968 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.491994 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492019 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a827077f-10f7-4609-93bc-14cd2b7889b4-config\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492042 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-etcd-client\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492076 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22c78\" (UniqueName: \"kubernetes.io/projected/337527e2-a869-4df8-988d-66bf559e348d-kube-api-access-22c78\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492108 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe6d297c-7bfa-4431-9b33-374d4ae3b503-serving-cert\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492134 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492156 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492180 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492241 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/99ae8982-f499-4219-9a53-8d76189324d5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492267 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4a88cd6c-06ab-471e-b7c1-e87b957e4392-machine-approver-tls\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492300 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrvxd\" (UniqueName: \"kubernetes.io/projected/11a0fa78-3646-42ca-a01a-8d93d78d669e-kube-api-access-wrvxd\") pod \"cluster-samples-operator-665b6dd947-xgspc\" (UID: \"11a0fa78-3646-42ca-a01a-8d93d78d669e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492324 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4a88cd6c-06ab-471e-b7c1-e87b957e4392-auth-proxy-config\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492348 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/aa3527bc-8d08-4c9a-9349-85d27473d624-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492371 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqdmm\" (UniqueName: \"kubernetes.io/projected/aa3527bc-8d08-4c9a-9349-85d27473d624-kube-api-access-cqdmm\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492398 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-config\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492420 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-encryption-config\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492442 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492465 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492485 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwn92\" (UniqueName: \"kubernetes.io/projected/516ee408-b349-44cd-9ba3-1a486e631818-kube-api-access-gwn92\") pod \"downloads-7954f5f757-9kr4w\" (UID: \"516ee408-b349-44cd-9ba3-1a486e631818\") " pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492514 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492536 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-service-ca\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492558 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa3527bc-8d08-4c9a-9349-85d27473d624-serving-cert\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.492867 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/337527e2-a869-4df8-988d-66bf559e348d-audit-dir\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.494068 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-client-ca\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.494577 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.496853 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-etcd-client\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.496904 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.497037 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-hxwlm"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.508148 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.508163 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.639097 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.641819 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21157116-8790-4342-ba0d-e356baad7ae1-serving-cert\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.642480 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.644744 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-config\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.645515 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-audit\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.646442 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-client-ca\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.647207 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.647487 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/99ae8982-f499-4219-9a53-8d76189324d5-service-ca\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.647591 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.648643 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-image-import-ca\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.648897 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/337527e2-a869-4df8-988d-66bf559e348d-node-pullsecrets\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.649076 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.649487 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.650433 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.650983 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.651515 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.651900 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.656857 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657122 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657174 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657349 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-etcd-client\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657553 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx4pw\" (UniqueName: \"kubernetes.io/projected/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-kube-api-access-hx4pw\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657585 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe6d297c-7bfa-4431-9b33-374d4ae3b503-config\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657623 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657665 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdwm4\" (UniqueName: \"kubernetes.io/projected/4a88cd6c-06ab-471e-b7c1-e87b957e4392-kube-api-access-mdwm4\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657686 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657732 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-policies\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657750 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7g2c\" (UniqueName: \"kubernetes.io/projected/fe57b94e-b773-4dc8-9a99-a2217ab4040c-kube-api-access-z7g2c\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657771 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4fjf\" (UniqueName: \"kubernetes.io/projected/ac50a1ff-ffd6-4c97-b685-04d5e9740183-kube-api-access-b4fjf\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657788 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657794 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-encryption-config\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657889 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657945 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cbwk\" (UniqueName: \"kubernetes.io/projected/dc247eab-6778-41d7-a69d-c551c989814e-kube-api-access-9cbwk\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.657968 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658012 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-serving-cert\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658040 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658062 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84wlp\" (UniqueName: \"kubernetes.io/projected/fe6d297c-7bfa-4431-9b33-374d4ae3b503-kube-api-access-84wlp\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658084 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-oauth-serving-cert\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658107 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-trusted-ca-bundle\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658130 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fe6d297c-7bfa-4431-9b33-374d4ae3b503-trusted-ca\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658163 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpbml\" (UniqueName: \"kubernetes.io/projected/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-kube-api-access-cpbml\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658183 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a827077f-10f7-4609-93bc-14cd2b7889b4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658204 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdhj8\" (UniqueName: \"kubernetes.io/projected/0d15401f-919f-4d4e-b466-91d2d0125952-kube-api-access-xdhj8\") pod \"dns-operator-744455d44c-d86b9\" (UID: \"0d15401f-919f-4d4e-b466-91d2d0125952\") " pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658237 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-dir\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658257 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a88cd6c-06ab-471e-b7c1-e87b957e4392-config\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658279 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d15401f-919f-4d4e-b466-91d2d0125952-metrics-tls\") pod \"dns-operator-744455d44c-d86b9\" (UID: \"0d15401f-919f-4d4e-b466-91d2d0125952\") " pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658303 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-config\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.658323 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-serving-cert\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.662209 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-config\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.663066 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-encryption-config\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.663319 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/99ae8982-f499-4219-9a53-8d76189324d5-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.663842 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-trusted-ca-bundle\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.663913 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-config\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.666749 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-etcd-serving-ca\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.674430 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/337527e2-a869-4df8-988d-66bf559e348d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.674663 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.706139 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-serving-cert\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.707250 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.708098 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.709101 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.709278 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.709398 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.709778 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-policies\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.709825 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jcvk4"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.710170 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/99ae8982-f499-4219-9a53-8d76189324d5-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.710326 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.710594 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.711126 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fe6d297c-7bfa-4431-9b33-374d4ae3b503-trusted-ca\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.713551 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-dir\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.715538 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.716929 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.717253 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.717419 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.717603 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.717798 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.717928 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-oauth-serving-cert\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.718314 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-etcd-client\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.718772 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a827077f-10f7-4609-93bc-14cd2b7889b4-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.718867 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe6d297c-7bfa-4431-9b33-374d4ae3b503-config\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.718970 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.719112 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.719286 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.719591 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.720150 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.720483 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-serving-cert\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.720815 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ac50a1ff-ffd6-4c97-b685-04d5e9740183-serving-cert\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.721190 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-serving-cert\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.721370 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/337527e2-a869-4df8-988d-66bf559e348d-encryption-config\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.721820 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.721829 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/11a0fa78-3646-42ca-a01a-8d93d78d669e-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-xgspc\" (UID: \"11a0fa78-3646-42ca-a01a-8d93d78d669e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.722415 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.723409 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.724164 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.724572 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4a88cd6c-06ab-471e-b7c1-e87b957e4392-config\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.725040 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.725291 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0d15401f-919f-4d4e-b466-91d2d0125952-metrics-tls\") pod \"dns-operator-744455d44c-d86b9\" (UID: \"0d15401f-919f-4d4e-b466-91d2d0125952\") " pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.725370 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-config\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.725679 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k4fgt"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.725749 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.726126 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.726265 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.726553 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.728220 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.728732 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.730861 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.731271 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.734246 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.736648 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.737121 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.738370 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.739113 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b9nsx"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.740677 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-md5gg"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.741396 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.741797 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.742386 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.742660 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.743438 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.744081 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.745104 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.746400 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/99ae8982-f499-4219-9a53-8d76189324d5-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-mm2q7\" (UID: \"99ae8982-f499-4219-9a53-8d76189324d5\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.747592 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbslp"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.748559 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.751605 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-94v8r"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.752576 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.753538 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.754192 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.754241 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759265 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759305 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-service-ca-bundle\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759335 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759355 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759376 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-config\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759393 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-oauth-config\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759412 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ac50a1ff-ffd6-4c97-b685-04d5e9740183-audit-dir\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759437 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc247eab-6778-41d7-a69d-c551c989814e-serving-cert\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759456 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a827077f-10f7-4609-93bc-14cd2b7889b4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759472 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-audit-policies\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759495 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759512 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a827077f-10f7-4609-93bc-14cd2b7889b4-config\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759539 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe6d297c-7bfa-4431-9b33-374d4ae3b503-serving-cert\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759559 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759580 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759600 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759632 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4a88cd6c-06ab-471e-b7c1-e87b957e4392-machine-approver-tls\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759653 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4a88cd6c-06ab-471e-b7c1-e87b957e4392-auth-proxy-config\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759676 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/aa3527bc-8d08-4c9a-9349-85d27473d624-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759694 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqdmm\" (UniqueName: \"kubernetes.io/projected/aa3527bc-8d08-4c9a-9349-85d27473d624-kube-api-access-cqdmm\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759732 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759753 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759773 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwn92\" (UniqueName: \"kubernetes.io/projected/516ee408-b349-44cd-9ba3-1a486e631818-kube-api-access-gwn92\") pod \"downloads-7954f5f757-9kr4w\" (UID: \"516ee408-b349-44cd-9ba3-1a486e631818\") " pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759791 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759808 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-service-ca\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.759828 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa3527bc-8d08-4c9a-9349-85d27473d624-serving-cert\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.760266 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-config\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.760640 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.761107 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-service-ca-bundle\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.761675 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4a88cd6c-06ab-471e-b7c1-e87b957e4392-auth-proxy-config\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.761834 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.762047 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc247eab-6778-41d7-a69d-c551c989814e-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.762221 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-service-ca\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.762916 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.763649 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/ac50a1ff-ffd6-4c97-b685-04d5e9740183-audit-policies\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.763725 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/ac50a1ff-ffd6-4c97-b685-04d5e9740183-audit-dir\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.763730 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/aa3527bc-8d08-4c9a-9349-85d27473d624-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.764314 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.764466 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.765524 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-mqlgr"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.765793 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a827077f-10f7-4609-93bc-14cd2b7889b4-config\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.766054 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.766728 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.766916 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.767455 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe6d297c-7bfa-4431-9b33-374d4ae3b503-serving-cert\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.766783 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc247eab-6778-41d7-a69d-c551c989814e-serving-cert\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.768197 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.768745 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa3527bc-8d08-4c9a-9349-85d27473d624-serving-cert\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.768937 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.770173 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.771904 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.773746 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-62qsd"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.774441 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.774459 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.774540 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.775317 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.784198 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-9kr4w"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.785292 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-2gfvd"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.786789 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.787863 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4a88cd6c-06ab-471e-b7c1-e87b957e4392-machine-approver-tls\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.787872 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.788344 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-oauth-config\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.789953 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.790317 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.790821 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k2czh"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.793528 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.794176 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-d86b9"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.794497 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.795425 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vxm8l"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.796573 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.797915 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-lxvjj"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.800138 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-8sf9d"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.800794 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-8sf9d" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.801963 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.802655 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.805321 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.806867 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.808039 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.808876 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.811092 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-md5gg"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.811137 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-hxwlm"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.812050 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.812886 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.813930 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jcvk4"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.815240 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-5svhh"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.816477 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.816607 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.817037 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.817854 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.818838 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-k4c6c"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.819831 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k4fgt"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.820966 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bwm86"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.821996 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkc9x"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.823208 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-5svhh"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.823309 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.824112 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-8sf9d"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.829028 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.830162 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkc9x"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.831415 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.832827 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbslp"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.833778 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-94v8r"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.834239 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.835000 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.835936 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f"] Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.840163 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 23:10:42.785128855 +0000 UTC Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.840550 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.854244 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.868756 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" event={"ID":"99ae8982-f499-4219-9a53-8d76189324d5","Type":"ContainerStarted","Data":"b3d9c0eee809e46f61aa0e909703349af97dce6d7c5b859abf9c8ecc7fc72723"} Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.889287 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsfg6\" (UniqueName: \"kubernetes.io/projected/21157116-8790-4342-ba0d-e356baad7ae1-kube-api-access-rsfg6\") pod \"route-controller-manager-6576b87f9c-ltp2j\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.929389 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmhsr\" (UniqueName: \"kubernetes.io/projected/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-kube-api-access-bmhsr\") pod \"controller-manager-879f6c89f-b9nsx\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.948194 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22c78\" (UniqueName: \"kubernetes.io/projected/337527e2-a869-4df8-988d-66bf559e348d-kube-api-access-22c78\") pod \"apiserver-76f77b778f-k2czh\" (UID: \"337527e2-a869-4df8-988d-66bf559e348d\") " pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.965408 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.969640 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrvxd\" (UniqueName: \"kubernetes.io/projected/11a0fa78-3646-42ca-a01a-8d93d78d669e-kube-api-access-wrvxd\") pod \"cluster-samples-operator-665b6dd947-xgspc\" (UID: \"11a0fa78-3646-42ca-a01a-8d93d78d669e\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.991718 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cbwk\" (UniqueName: \"kubernetes.io/projected/dc247eab-6778-41d7-a69d-c551c989814e-kube-api-access-9cbwk\") pod \"authentication-operator-69f744f599-k4c6c\" (UID: \"dc247eab-6778-41d7-a69d-c551c989814e\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:05 crc kubenswrapper[4948]: I0120 19:51:05.993688 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.005118 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.019154 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdwm4\" (UniqueName: \"kubernetes.io/projected/4a88cd6c-06ab-471e-b7c1-e87b957e4392-kube-api-access-mdwm4\") pod \"machine-approver-56656f9798-ng8r8\" (UID: \"4a88cd6c-06ab-471e-b7c1-e87b957e4392\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.031155 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7g2c\" (UniqueName: \"kubernetes.io/projected/fe57b94e-b773-4dc8-9a99-a2217ab4040c-kube-api-access-z7g2c\") pod \"console-f9d7485db-lxvjj\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.062972 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4fjf\" (UniqueName: \"kubernetes.io/projected/ac50a1ff-ffd6-4c97-b685-04d5e9740183-kube-api-access-b4fjf\") pod \"apiserver-7bbb656c7d-zs4jw\" (UID: \"ac50a1ff-ffd6-4c97-b685-04d5e9740183\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.110988 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.112044 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.116150 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdhj8\" (UniqueName: \"kubernetes.io/projected/0d15401f-919f-4d4e-b466-91d2d0125952-kube-api-access-xdhj8\") pod \"dns-operator-744455d44c-d86b9\" (UID: \"0d15401f-919f-4d4e-b466-91d2d0125952\") " pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.130765 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84wlp\" (UniqueName: \"kubernetes.io/projected/fe6d297c-7bfa-4431-9b33-374d4ae3b503-kube-api-access-84wlp\") pod \"console-operator-58897d9998-2gfvd\" (UID: \"fe6d297c-7bfa-4431-9b33-374d4ae3b503\") " pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.132487 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx4pw\" (UniqueName: \"kubernetes.io/projected/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-kube-api-access-hx4pw\") pod \"oauth-openshift-558db77b4-vxm8l\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.132767 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.134265 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.141551 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpbml\" (UniqueName: \"kubernetes.io/projected/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-kube-api-access-cpbml\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.157240 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.242931 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.244618 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.244693 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.244743 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.244770 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.250527 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.256211 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.314339 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.314417 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.314588 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.345348 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.354145 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.355063 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.374189 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.446671 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.476205 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.476462 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.476668 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.477787 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.486098 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.504577 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.536314 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.545785 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.569879 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.570202 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.570226 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.570784 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.573077 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.601745 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.611298 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=2.611280727 podStartE2EDuration="2.611280727s" podCreationTimestamp="2026-01-20 19:51:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:06.609105907 +0000 UTC m=+94.559830876" watchObservedRunningTime="2026-01-20 19:51:06.611280727 +0000 UTC m=+94.562005686" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.622281 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.646374 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 20 19:51:06 crc kubenswrapper[4948]: E0120 19:51:06.646378 4948 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 20 19:51:06 crc kubenswrapper[4948]: E0120 19:51:06.646492 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/487f8971-88dc-4ebe-9d67-3b48284c72f9-config podName:487f8971-88dc-4ebe-9d67-3b48284c72f9 nodeName:}" failed. No retries permitted until 2026-01-20 19:51:07.146458161 +0000 UTC m=+95.097183120 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/487f8971-88dc-4ebe-9d67-3b48284c72f9-config") pod "openshift-apiserver-operator-796bbdcf4f-ts8z9" (UID: "487f8971-88dc-4ebe-9d67-3b48284c72f9") : failed to sync configmap cache: timed out waiting for the condition Jan 20 19:51:06 crc kubenswrapper[4948]: E0120 19:51:06.646733 4948 secret.go:188] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 20 19:51:06 crc kubenswrapper[4948]: E0120 19:51:06.646783 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/487f8971-88dc-4ebe-9d67-3b48284c72f9-serving-cert podName:487f8971-88dc-4ebe-9d67-3b48284c72f9 nodeName:}" failed. No retries permitted until 2026-01-20 19:51:07.14676773 +0000 UTC m=+95.097492699 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/487f8971-88dc-4ebe-9d67-3b48284c72f9-serving-cert") pod "openshift-apiserver-operator-796bbdcf4f-ts8z9" (UID: "487f8971-88dc-4ebe-9d67-3b48284c72f9") : failed to sync secret cache: timed out waiting for the condition Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.658794 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.675273 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.694439 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.710418 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k2czh"] Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.715097 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.739200 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.754454 4948 request.go:700] Waited for 1.010791101s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca-operator/configmaps?fieldSelector=metadata.name%3Dservice-ca-operator-config&limit=500&resourceVersion=0 Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.758223 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.784179 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.799226 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.817772 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.829766 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j"] Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.835489 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: W0120 19:51:06.854213 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod21157116_8790_4342_ba0d_e356baad7ae1.slice/crio-168ce56662bbbbce72996d545dec4d711bc62bdf444606e3eda248c2859baaf1 WatchSource:0}: Error finding container 168ce56662bbbbce72996d545dec4d711bc62bdf444606e3eda248c2859baaf1: Status 404 returned error can't find the container with id 168ce56662bbbbce72996d545dec4d711bc62bdf444606e3eda248c2859baaf1 Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.859022 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.874053 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.881684 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" event={"ID":"337527e2-a869-4df8-988d-66bf559e348d","Type":"ContainerStarted","Data":"dbd241184c6c07a719041cfdce12eab3669a5cd96f7bf6eb9e47b596b99df39f"} Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.894244 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.898902 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" event={"ID":"4a88cd6c-06ab-471e-b7c1-e87b957e4392","Type":"ContainerStarted","Data":"36b47540e204f55a7aa4c028b3db3ce0deb6aea401e43dade086ea892bd7b725"} Jan 20 19:51:06 crc kubenswrapper[4948]: E0120 19:51:06.905324 4948 projected.go:288] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.914735 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.921389 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" event={"ID":"99ae8982-f499-4219-9a53-8d76189324d5","Type":"ContainerStarted","Data":"cddb5e221c9dc1c7da3c94850094b6c4bddc7e616e70025d83f8b2c9b4f2d58a"} Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.927524 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" event={"ID":"21157116-8790-4342-ba0d-e356baad7ae1","Type":"ContainerStarted","Data":"168ce56662bbbbce72996d545dec4d711bc62bdf444606e3eda248c2859baaf1"} Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.936505 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.957529 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.959056 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw"] Jan 20 19:51:06 crc kubenswrapper[4948]: W0120 19:51:06.966661 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac50a1ff_ffd6_4c97_b685_04d5e9740183.slice/crio-098fb479677f2fbcf26b59b789ab489b783e5782233b28d68ecf97982640cd49 WatchSource:0}: Error finding container 098fb479677f2fbcf26b59b789ab489b783e5782233b28d68ecf97982640cd49: Status 404 returned error can't find the container with id 098fb479677f2fbcf26b59b789ab489b783e5782233b28d68ecf97982640cd49 Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.974760 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc"] Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.974976 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 20 19:51:06 crc kubenswrapper[4948]: I0120 19:51:06.997549 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vxm8l"] Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.001267 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.086967 4948 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" secret="" err="failed to sync secret cache: timed out waiting for the condition" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.087059 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.088123 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.093695 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.093942 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.094124 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.095392 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.114035 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.141337 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.146920 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-lxvjj"] Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.155568 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.177860 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-2gfvd"] Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.178461 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.190334 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/487f8971-88dc-4ebe-9d67-3b48284c72f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.190390 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/487f8971-88dc-4ebe-9d67-3b48284c72f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.199294 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-k4c6c"] Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.200210 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.217773 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 20 19:51:07 crc kubenswrapper[4948]: W0120 19:51:07.242871 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe57b94e_b773_4dc8_9a99_a2217ab4040c.slice/crio-26f0b10cf419ac44b9997f8537444c6b33e634e3b8c5ad4afb3a6bdad64761ad WatchSource:0}: Error finding container 26f0b10cf419ac44b9997f8537444c6b33e634e3b8c5ad4afb3a6bdad64761ad: Status 404 returned error can't find the container with id 26f0b10cf419ac44b9997f8537444c6b33e634e3b8c5ad4afb3a6bdad64761ad Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.243328 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.261850 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-d86b9"] Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.290561 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwn92\" (UniqueName: \"kubernetes.io/projected/516ee408-b349-44cd-9ba3-1a486e631818-kube-api-access-gwn92\") pod \"downloads-7954f5f757-9kr4w\" (UID: \"516ee408-b349-44cd-9ba3-1a486e631818\") " pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.314535 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqdmm\" (UniqueName: \"kubernetes.io/projected/aa3527bc-8d08-4c9a-9349-85d27473d624-kube-api-access-cqdmm\") pod \"openshift-config-operator-7777fb866f-6cqcg\" (UID: \"aa3527bc-8d08-4c9a-9349-85d27473d624\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.316938 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0a10e0e8-3193-4a13-ae0f-4a20c5e854b4-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5rg9m\" (UID: \"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.328911 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a827077f-10f7-4609-93bc-14cd2b7889b4-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-4225h\" (UID: \"a827077f-10f7-4609-93bc-14cd2b7889b4\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.334962 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.354832 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.375572 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.395019 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.415103 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.436326 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.455955 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.469244 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b9nsx"] Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.475110 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.504670 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.521952 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.537955 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.546461 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.555271 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.565567 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.579186 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.595552 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.602942 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.616381 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.619889 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.635273 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.655858 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.676970 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.695590 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.730269 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.761452 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.772661 4948 request.go:700] Waited for 1.955609124s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns/secrets?fieldSelector=metadata.name%3Ddns-default-metrics-tls&limit=500&resourceVersion=0 Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.781310 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.818849 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g5vj\" (UniqueName: \"kubernetes.io/projected/666e60ed-f213-4af4-a4a9-969864d1fd0e-kube-api-access-8g5vj\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819226 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-certificates\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819420 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666e60ed-f213-4af4-a4a9-969864d1fd0e-config\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819481 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d9173bf0-5a37-423e-94e7-7496bd69f2ee-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819522 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4pnmq\" (UID: \"203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819547 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d9173bf0-5a37-423e-94e7-7496bd69f2ee-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819598 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/666e60ed-f213-4af4-a4a9-969864d1fd0e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819781 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-647fc\" (UniqueName: \"kubernetes.io/projected/203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3-kube-api-access-647fc\") pod \"control-plane-machine-set-operator-78cbb6b69f-4pnmq\" (UID: \"203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819820 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-trusted-ca\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819940 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzk6g\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-kube-api-access-nzk6g\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.819997 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-bound-sa-token\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.820045 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pprj4\" (UniqueName: \"kubernetes.io/projected/f03e94eb-7658-49ed-a576-5ac4cecfe82c-kube-api-access-pprj4\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.820338 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/666e60ed-f213-4af4-a4a9-969864d1fd0e-images\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.820365 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f03e94eb-7658-49ed-a576-5ac4cecfe82c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.820455 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-tls\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.821200 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.821295 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f03e94eb-7658-49ed-a576-5ac4cecfe82c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.823248 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.906145 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: E0120 19:51:07.908757 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.408693925 +0000 UTC m=+96.359418894 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.915323 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.915799 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.917721 4948 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 20 19:51:07 crc kubenswrapper[4948]: E0120 19:51:07.919246 4948 projected.go:288] Couldn't get configMap openshift-apiserver-operator/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 20 19:51:07 crc kubenswrapper[4948]: E0120 19:51:07.920253 4948 projected.go:194] Error preparing data for projected volume kube-api-access-zcp74 for pod openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9: failed to sync configmap cache: timed out waiting for the condition Jan 20 19:51:07 crc kubenswrapper[4948]: E0120 19:51:07.920394 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/487f8971-88dc-4ebe-9d67-3b48284c72f9-kube-api-access-zcp74 podName:487f8971-88dc-4ebe-9d67-3b48284c72f9 nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.420371756 +0000 UTC m=+96.371096715 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-zcp74" (UniqueName: "kubernetes.io/projected/487f8971-88dc-4ebe-9d67-3b48284c72f9-kube-api-access-zcp74") pod "openshift-apiserver-operator-796bbdcf4f-ts8z9" (UID: "487f8971-88dc-4ebe-9d67-3b48284c72f9") : failed to sync configmap cache: timed out waiting for the condition Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.922852 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.923237 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.923445 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/925c0fbe-bc51-41ee-b496-1a83b01918dd-trusted-ca\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.923490 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rglwv\" (UniqueName: \"kubernetes.io/projected/925c0fbe-bc51-41ee-b496-1a83b01918dd-kube-api-access-rglwv\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.923530 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-registration-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.923581 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d9173bf0-5a37-423e-94e7-7496bd69f2ee-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.926870 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d9173bf0-5a37-423e-94e7-7496bd69f2ee-ca-trust-extracted\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.931092 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/487f8971-88dc-4ebe-9d67-3b48284c72f9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.931632 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.931790 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b4cfc509-9b4a-4239-9a47-d6af6df02b35-certs\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.932071 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-trusted-ca\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.932113 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzk6g\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-kube-api-access-nzk6g\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.932152 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/34a4c701-23f8-4d4e-97c0-7ceeaa229d0f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k4fgt\" (UID: \"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.932187 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-bound-sa-token\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.932253 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pprj4\" (UniqueName: \"kubernetes.io/projected/f03e94eb-7658-49ed-a576-5ac4cecfe82c-kube-api-access-pprj4\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.935913 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1267ed5-1f11-4e42-b538-c6d355855019-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.935960 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4bcq\" (UniqueName: \"kubernetes.io/projected/ac63d066-004a-468f-a63d-48eae71c9111-kube-api-access-s4bcq\") pod \"package-server-manager-789f6589d5-p46fx\" (UID: \"ac63d066-004a-468f-a63d-48eae71c9111\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936033 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-config\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936101 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-srv-cert\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936147 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdsz9\" (UniqueName: \"kubernetes.io/projected/34a4c701-23f8-4d4e-97c0-7ceeaa229d0f-kube-api-access-hdsz9\") pod \"multus-admission-controller-857f4d67dd-k4fgt\" (UID: \"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936246 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4764a2-50ea-421c-9d14-13189740a541-config-volume\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936321 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f03e94eb-7658-49ed-a576-5ac4cecfe82c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936358 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lk9p2\" (UniqueName: \"kubernetes.io/projected/13e58171-7fc1-4feb-bcb5-2737e74615a6-kube-api-access-lk9p2\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936412 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t599m\" (UniqueName: \"kubernetes.io/projected/e860d704-e6b4-4490-8dda-52696e52d75d-kube-api-access-t599m\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936452 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936483 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxmrv\" (UniqueName: \"kubernetes.io/projected/bc3d2e55-288e-4c8c-8a78-cacf02725918-kube-api-access-hxmrv\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936524 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-socket-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936562 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-mountpoint-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936594 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lrmz\" (UniqueName: \"kubernetes.io/projected/d9894924-d73d-4e5f-9a04-bf4c6bed159a-kube-api-access-9lrmz\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936619 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2ntp\" (UniqueName: \"kubernetes.io/projected/cf1d582b-c803-4add-9b38-67358e29dd96-kube-api-access-k2ntp\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936648 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35ab84e9-16ce-4c92-b69b-d53854b18979-webhook-cert\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936683 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b4cfc509-9b4a-4239-9a47-d6af6df02b35-node-bootstrap-token\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936729 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v98lr\" (UniqueName: \"kubernetes.io/projected/b4cfc509-9b4a-4239-9a47-d6af6df02b35-kube-api-access-v98lr\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.936896 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-csi-data-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:07 crc kubenswrapper[4948]: E0120 19:51:07.936940 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.436910469 +0000 UTC m=+96.387635438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.938159 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-trusted-ca\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.939722 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.941507 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/487f8971-88dc-4ebe-9d67-3b48284c72f9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.941537 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.941696 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjfqs\" (UniqueName: \"kubernetes.io/projected/31b15d20-e87f-4c55-8109-ead0574ff43d-kube-api-access-rjfqs\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.941803 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-stats-auth\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.941875 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-service-ca\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.941950 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ea9e37e3-8bd7-4468-991b-2855d3d3385f-profile-collector-cert\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/925c0fbe-bc51-41ee-b496-1a83b01918dd-bound-sa-token\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942081 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/31b15d20-e87f-4c55-8109-ead0574ff43d-metrics-tls\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942162 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-plugins-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942241 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ea9e37e3-8bd7-4468-991b-2855d3d3385f-srv-cert\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942306 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-client\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942371 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d9894924-d73d-4e5f-9a04-bf4c6bed159a-serving-cert\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942451 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/ac63d066-004a-468f-a63d-48eae71c9111-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-p46fx\" (UID: \"ac63d066-004a-468f-a63d-48eae71c9111\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942540 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a15f8225-8436-459c-909a-dcc98d5d35fb-cert\") pod \"ingress-canary-8sf9d\" (UID: \"a15f8225-8436-459c-909a-dcc98d5d35fb\") " pod="openshift-ingress-canary/ingress-canary-8sf9d" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942627 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/35ab84e9-16ce-4c92-b69b-d53854b18979-tmpfs\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942717 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-certificates\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942794 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjzhv\" (UniqueName: \"kubernetes.io/projected/dcc77a74-fa21-4f82-af61-42c73086f4a8-kube-api-access-mjzhv\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942870 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cf1d582b-c803-4add-9b38-67358e29dd96-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.942980 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666e60ed-f213-4af4-a4a9-969864d1fd0e-config\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943069 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4pnmq\" (UID: \"203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943144 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4764a2-50ea-421c-9d14-13189740a541-secret-volume\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943257 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d9173bf0-5a37-423e-94e7-7496bd69f2ee-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943355 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/666e60ed-f213-4af4-a4a9-969864d1fd0e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943427 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-647fc\" (UniqueName: \"kubernetes.io/projected/203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3-kube-api-access-647fc\") pod \"control-plane-machine-set-operator-78cbb6b69f-4pnmq\" (UID: \"203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943494 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f4lh\" (UniqueName: \"kubernetes.io/projected/0d4764a2-50ea-421c-9d14-13189740a541-kube-api-access-6f4lh\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943563 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc3d2e55-288e-4c8c-8a78-cacf02725918-serving-cert\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943641 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdfk2\" (UniqueName: \"kubernetes.io/projected/35ab84e9-16ce-4c92-b69b-d53854b18979-kube-api-access-mdfk2\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943739 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e860d704-e6b4-4490-8dda-52696e52d75d-proxy-tls\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943830 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt6r4\" (UniqueName: \"kubernetes.io/projected/15db69a5-93e7-4777-b31a-800760048d6e-kube-api-access-pt6r4\") pod \"migrator-59844c95c7-l48rg\" (UID: \"15db69a5-93e7-4777-b31a-800760048d6e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.943954 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1267ed5-1f11-4e42-b538-c6d355855019-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944029 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kzp5\" (UniqueName: \"kubernetes.io/projected/ea9e37e3-8bd7-4468-991b-2855d3d3385f-kube-api-access-5kzp5\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944105 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/13e58171-7fc1-4feb-bcb5-2737e74615a6-signing-key\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944173 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35ab84e9-16ce-4c92-b69b-d53854b18979-apiservice-cert\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944262 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/666e60ed-f213-4af4-a4a9-969864d1fd0e-images\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944335 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944409 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31b15d20-e87f-4c55-8109-ead0574ff43d-config-volume\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944492 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1267ed5-1f11-4e42-b538-c6d355855019-config\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944584 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-tls\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944661 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cf1d582b-c803-4add-9b38-67358e29dd96-proxy-tls\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944765 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-ca\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944873 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ftbm\" (UniqueName: \"kubernetes.io/projected/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-kube-api-access-2ftbm\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.944958 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945034 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcc77a74-fa21-4f82-af61-42c73086f4a8-service-ca-bundle\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945111 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945186 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945258 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945323 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/13e58171-7fc1-4feb-bcb5-2737e74615a6-signing-cabundle\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945400 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f03e94eb-7658-49ed-a576-5ac4cecfe82c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945468 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-default-certificate\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945540 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xp22\" (UniqueName: \"kubernetes.io/projected/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-kube-api-access-2xp22\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945610 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llsjh\" (UniqueName: \"kubernetes.io/projected/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-kube-api-access-llsjh\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945677 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e860d704-e6b4-4490-8dda-52696e52d75d-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945768 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g5vj\" (UniqueName: \"kubernetes.io/projected/666e60ed-f213-4af4-a4a9-969864d1fd0e-kube-api-access-8g5vj\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.945854 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/925c0fbe-bc51-41ee-b496-1a83b01918dd-metrics-tls\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.951932 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cf1d582b-c803-4add-9b38-67358e29dd96-images\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.952001 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4w5b\" (UniqueName: \"kubernetes.io/projected/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-kube-api-access-j4w5b\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.952041 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9894924-d73d-4e5f-9a04-bf4c6bed159a-config\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.952076 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcdvv\" (UniqueName: \"kubernetes.io/projected/a15f8225-8436-459c-909a-dcc98d5d35fb-kube-api-access-bcdvv\") pod \"ingress-canary-8sf9d\" (UID: \"a15f8225-8436-459c-909a-dcc98d5d35fb\") " pod="openshift-ingress-canary/ingress-canary-8sf9d" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.952133 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.952170 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-metrics-certs\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.953075 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/666e60ed-f213-4af4-a4a9-969864d1fd0e-config\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.957394 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f03e94eb-7658-49ed-a576-5ac4cecfe82c-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.958476 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/666e60ed-f213-4af4-a4a9-969864d1fd0e-images\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.959577 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.962543 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-tls\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: E0120 19:51:07.964640 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.464576278 +0000 UTC m=+96.415301247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.968063 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" event={"ID":"0d15401f-919f-4d4e-b466-91d2d0125952","Type":"ContainerStarted","Data":"b60327f60dfc60362445db69281d34cda40f0b2b15274c6d271f721b3a120f43"} Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.970601 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4pnmq\" (UID: \"203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.970795 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-certificates\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.971193 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" event={"ID":"21157116-8790-4342-ba0d-e356baad7ae1","Type":"ContainerStarted","Data":"3719c0e71f9240fa1325a50866f37766f7e6d0a426cdf00678035e77268df85c"} Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.972515 4948 generic.go:334] "Generic (PLEG): container finished" podID="ac50a1ff-ffd6-4c97-b685-04d5e9740183" containerID="637c75bd8bca1ab7249911f704ef64e8c43a94b60e0fbdcf9fc57023b2d3595d" exitCode=0 Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.973283 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.973387 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" event={"ID":"ac50a1ff-ffd6-4c97-b685-04d5e9740183","Type":"ContainerDied","Data":"637c75bd8bca1ab7249911f704ef64e8c43a94b60e0fbdcf9fc57023b2d3595d"} Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.973477 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" event={"ID":"ac50a1ff-ffd6-4c97-b685-04d5e9740183","Type":"ContainerStarted","Data":"098fb479677f2fbcf26b59b789ab489b783e5782233b28d68ecf97982640cd49"} Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.976117 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.980206 4948 generic.go:334] "Generic (PLEG): container finished" podID="337527e2-a869-4df8-988d-66bf559e348d" containerID="6c592b6fa924f39fa4dd0d518d341d5a7c555723af80ca71e01a0c7e8f8ce4ec" exitCode=0 Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.980268 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" event={"ID":"337527e2-a869-4df8-988d-66bf559e348d","Type":"ContainerDied","Data":"6c592b6fa924f39fa4dd0d518d341d5a7c555723af80ca71e01a0c7e8f8ce4ec"} Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.984971 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/666e60ed-f213-4af4-a4a9-969864d1fd0e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.987715 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lxvjj" event={"ID":"fe57b94e-b773-4dc8-9a99-a2217ab4040c","Type":"ContainerStarted","Data":"77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf"} Jan 20 19:51:07 crc kubenswrapper[4948]: I0120 19:51:07.987829 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lxvjj" event={"ID":"fe57b94e-b773-4dc8-9a99-a2217ab4040c","Type":"ContainerStarted","Data":"26f0b10cf419ac44b9997f8537444c6b33e634e3b8c5ad4afb3a6bdad64761ad"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.012629 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.016964 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f03e94eb-7658-49ed-a576-5ac4cecfe82c-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.040221 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.040621 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d9173bf0-5a37-423e-94e7-7496bd69f2ee-installation-pull-secrets\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053419 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053647 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-config\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053687 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lk9p2\" (UniqueName: \"kubernetes.io/projected/13e58171-7fc1-4feb-bcb5-2737e74615a6-kube-api-access-lk9p2\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053728 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-srv-cert\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053745 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdsz9\" (UniqueName: \"kubernetes.io/projected/34a4c701-23f8-4d4e-97c0-7ceeaa229d0f-kube-api-access-hdsz9\") pod \"multus-admission-controller-857f4d67dd-k4fgt\" (UID: \"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053768 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4764a2-50ea-421c-9d14-13189740a541-config-volume\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053783 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t599m\" (UniqueName: \"kubernetes.io/projected/e860d704-e6b4-4490-8dda-52696e52d75d-kube-api-access-t599m\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053821 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053836 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxmrv\" (UniqueName: \"kubernetes.io/projected/bc3d2e55-288e-4c8c-8a78-cacf02725918-kube-api-access-hxmrv\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053866 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-socket-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053882 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-mountpoint-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053897 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lrmz\" (UniqueName: \"kubernetes.io/projected/d9894924-d73d-4e5f-9a04-bf4c6bed159a-kube-api-access-9lrmz\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053912 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2ntp\" (UniqueName: \"kubernetes.io/projected/cf1d582b-c803-4add-9b38-67358e29dd96-kube-api-access-k2ntp\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053928 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35ab84e9-16ce-4c92-b69b-d53854b18979-webhook-cert\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053944 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b4cfc509-9b4a-4239-9a47-d6af6df02b35-node-bootstrap-token\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053958 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v98lr\" (UniqueName: \"kubernetes.io/projected/b4cfc509-9b4a-4239-9a47-d6af6df02b35-kube-api-access-v98lr\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053976 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-csi-data-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.053994 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054019 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-stats-auth\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054040 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjfqs\" (UniqueName: \"kubernetes.io/projected/31b15d20-e87f-4c55-8109-ead0574ff43d-kube-api-access-rjfqs\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054056 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-service-ca\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054092 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ea9e37e3-8bd7-4468-991b-2855d3d3385f-profile-collector-cert\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054123 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-plugins-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054143 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/925c0fbe-bc51-41ee-b496-1a83b01918dd-bound-sa-token\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054159 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/31b15d20-e87f-4c55-8109-ead0574ff43d-metrics-tls\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054174 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ea9e37e3-8bd7-4468-991b-2855d3d3385f-srv-cert\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054189 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-client\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054205 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d9894924-d73d-4e5f-9a04-bf4c6bed159a-serving-cert\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054223 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/ac63d066-004a-468f-a63d-48eae71c9111-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-p46fx\" (UID: \"ac63d066-004a-468f-a63d-48eae71c9111\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054245 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a15f8225-8436-459c-909a-dcc98d5d35fb-cert\") pod \"ingress-canary-8sf9d\" (UID: \"a15f8225-8436-459c-909a-dcc98d5d35fb\") " pod="openshift-ingress-canary/ingress-canary-8sf9d" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054270 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/35ab84e9-16ce-4c92-b69b-d53854b18979-tmpfs\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054287 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjzhv\" (UniqueName: \"kubernetes.io/projected/dcc77a74-fa21-4f82-af61-42c73086f4a8-kube-api-access-mjzhv\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054319 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cf1d582b-c803-4add-9b38-67358e29dd96-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054360 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4764a2-50ea-421c-9d14-13189740a541-secret-volume\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054402 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f4lh\" (UniqueName: \"kubernetes.io/projected/0d4764a2-50ea-421c-9d14-13189740a541-kube-api-access-6f4lh\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054437 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc3d2e55-288e-4c8c-8a78-cacf02725918-serving-cert\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054452 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdfk2\" (UniqueName: \"kubernetes.io/projected/35ab84e9-16ce-4c92-b69b-d53854b18979-kube-api-access-mdfk2\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054471 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e860d704-e6b4-4490-8dda-52696e52d75d-proxy-tls\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054509 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pt6r4\" (UniqueName: \"kubernetes.io/projected/15db69a5-93e7-4777-b31a-800760048d6e-kube-api-access-pt6r4\") pod \"migrator-59844c95c7-l48rg\" (UID: \"15db69a5-93e7-4777-b31a-800760048d6e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054526 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1267ed5-1f11-4e42-b538-c6d355855019-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054542 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kzp5\" (UniqueName: \"kubernetes.io/projected/ea9e37e3-8bd7-4468-991b-2855d3d3385f-kube-api-access-5kzp5\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054557 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/13e58171-7fc1-4feb-bcb5-2737e74615a6-signing-key\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054575 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35ab84e9-16ce-4c92-b69b-d53854b18979-apiservice-cert\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054599 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1267ed5-1f11-4e42-b538-c6d355855019-config\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054616 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054631 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31b15d20-e87f-4c55-8109-ead0574ff43d-config-volume\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054648 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cf1d582b-c803-4add-9b38-67358e29dd96-proxy-tls\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054677 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-ca\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054696 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ftbm\" (UniqueName: \"kubernetes.io/projected/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-kube-api-access-2ftbm\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054731 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054758 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcc77a74-fa21-4f82-af61-42c73086f4a8-service-ca-bundle\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054782 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054797 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054814 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/13e58171-7fc1-4feb-bcb5-2737e74615a6-signing-cabundle\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054830 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-default-certificate\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054857 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xp22\" (UniqueName: \"kubernetes.io/projected/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-kube-api-access-2xp22\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054873 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llsjh\" (UniqueName: \"kubernetes.io/projected/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-kube-api-access-llsjh\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054896 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e860d704-e6b4-4490-8dda-52696e52d75d-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054918 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/925c0fbe-bc51-41ee-b496-1a83b01918dd-metrics-tls\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054949 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cf1d582b-c803-4add-9b38-67358e29dd96-images\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054966 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4w5b\" (UniqueName: \"kubernetes.io/projected/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-kube-api-access-j4w5b\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.054984 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcdvv\" (UniqueName: \"kubernetes.io/projected/a15f8225-8436-459c-909a-dcc98d5d35fb-kube-api-access-bcdvv\") pod \"ingress-canary-8sf9d\" (UID: \"a15f8225-8436-459c-909a-dcc98d5d35fb\") " pod="openshift-ingress-canary/ingress-canary-8sf9d" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055001 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9894924-d73d-4e5f-9a04-bf4c6bed159a-config\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055046 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055062 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-metrics-certs\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055078 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/925c0fbe-bc51-41ee-b496-1a83b01918dd-trusted-ca\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055096 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rglwv\" (UniqueName: \"kubernetes.io/projected/925c0fbe-bc51-41ee-b496-1a83b01918dd-kube-api-access-rglwv\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055120 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-registration-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055159 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055185 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b4cfc509-9b4a-4239-9a47-d6af6df02b35-certs\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055215 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/34a4c701-23f8-4d4e-97c0-7ceeaa229d0f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k4fgt\" (UID: \"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055250 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1267ed5-1f11-4e42-b538-c6d355855019-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.055266 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4bcq\" (UniqueName: \"kubernetes.io/projected/ac63d066-004a-468f-a63d-48eae71c9111-kube-api-access-s4bcq\") pod \"package-server-manager-789f6589d5-p46fx\" (UID: \"ac63d066-004a-468f-a63d-48eae71c9111\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:08 crc kubenswrapper[4948]: E0120 19:51:08.056305 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.556284731 +0000 UTC m=+96.507009700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.057435 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-config\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.099911 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e860d704-e6b4-4490-8dda-52696e52d75d-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.112840 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" event={"ID":"c22d8773-24ca-45ba-95b2-375bb9ccc6bb","Type":"ContainerStarted","Data":"2ea83b3ba47b15b86978e3b6f1fe7d9be80fa6215281bdf3ca10c701c717a4df"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.112891 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" event={"ID":"c22d8773-24ca-45ba-95b2-375bb9ccc6bb","Type":"ContainerStarted","Data":"0f120ebd3be471a6e842b191a142ca11ce8934534eea857340af169658813ea2"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.114242 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-plugins-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.115766 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4764a2-50ea-421c-9d14-13189740a541-config-volume\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.116212 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.119765 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.120172 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.124567 4948 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-b9nsx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.124628 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" podUID="c22d8773-24ca-45ba-95b2-375bb9ccc6bb" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.126487 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-registration-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.130550 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-socket-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.130602 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-mountpoint-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.136203 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/35ab84e9-16ce-4c92-b69b-d53854b18979-tmpfs\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.137198 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/cf1d582b-c803-4add-9b38-67358e29dd96-images\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.137534 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d1267ed5-1f11-4e42-b538-c6d355855019-config\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.138268 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-ca\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.140260 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9894924-d73d-4e5f-9a04-bf4c6bed159a-config\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.145420 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.145622 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.145753 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.147240 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/13e58171-7fc1-4feb-bcb5-2737e74615a6-signing-key\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.148838 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.150631 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.159056 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/ea9e37e3-8bd7-4468-991b-2855d3d3385f-srv-cert\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.160889 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.161781 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cf1d582b-c803-4add-9b38-67358e29dd96-auth-proxy-config\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.166433 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.166579 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/ea9e37e3-8bd7-4468-991b-2855d3d3385f-profile-collector-cert\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:08 crc kubenswrapper[4948]: E0120 19:51:08.166749 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.666735879 +0000 UTC m=+96.617460848 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.168279 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.168992 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/13e58171-7fc1-4feb-bcb5-2737e74615a6-signing-cabundle\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.170150 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-metrics-certs\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.170445 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcc77a74-fa21-4f82-af61-42c73086f4a8-service-ca-bundle\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.171095 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a15f8225-8436-459c-909a-dcc98d5d35fb-cert\") pod \"ingress-canary-8sf9d\" (UID: \"a15f8225-8436-459c-909a-dcc98d5d35fb\") " pod="openshift-ingress-canary/ingress-canary-8sf9d" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.171491 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/925c0fbe-bc51-41ee-b496-1a83b01918dd-trusted-ca\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.171553 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-srv-cert\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.171889 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.171963 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-csi-data-dir\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.172433 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/31b15d20-e87f-4c55-8109-ead0574ff43d-config-volume\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.174520 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/31b15d20-e87f-4c55-8109-ead0574ff43d-metrics-tls\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.175077 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b4cfc509-9b4a-4239-9a47-d6af6df02b35-node-bootstrap-token\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.176390 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/35ab84e9-16ce-4c92-b69b-d53854b18979-webhook-cert\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.183555 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-service-ca\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.229377 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-default-certificate\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.236285 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/ac63d066-004a-468f-a63d-48eae71c9111-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-p46fx\" (UID: \"ac63d066-004a-468f-a63d-48eae71c9111\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.236763 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" event={"ID":"4a88cd6c-06ab-471e-b7c1-e87b957e4392","Type":"ContainerStarted","Data":"891f7b95b70e0d4a068e5e569f635d80b2f6a3b6f74eb8d0b2b988874b6556f6"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.245668 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" event={"ID":"4a88cd6c-06ab-471e-b7c1-e87b957e4392","Type":"ContainerStarted","Data":"b4e587e1bdc61756393aa8dbbd064c81bb13f741433179776dd9e64f801eb4e7"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.245285 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b4cfc509-9b4a-4239-9a47-d6af6df02b35-certs\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.259217 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" event={"ID":"11a0fa78-3646-42ca-a01a-8d93d78d669e","Type":"ContainerStarted","Data":"68aeb01a5f5242c0cfccd7e28f1e3c7d4a28792dadd2f3ee906343a1e4fbf1d3"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.259286 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" event={"ID":"11a0fa78-3646-42ca-a01a-8d93d78d669e","Type":"ContainerStarted","Data":"6bca589bac845bb02190faa23f0a028560bb4e844d7c48b4fe7fc5701a3299a5"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.259322 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" event={"ID":"11a0fa78-3646-42ca-a01a-8d93d78d669e","Type":"ContainerStarted","Data":"660d6c37181c1616290a0b5382e54edb292ae06c1d8c7f376fea2fd5cbbba583"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.262663 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" event={"ID":"65a093ae-de0d-4938-9fe8-ba43c4b3eef0","Type":"ContainerStarted","Data":"d16b9bf027baa151c3deefa2434cbe49f94c835bc3c58ab2f402ae916429a9b1"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.262712 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" event={"ID":"65a093ae-de0d-4938-9fe8-ba43c4b3eef0","Type":"ContainerStarted","Data":"d75d9c8131bcf2d382557aa61e598740ff2a71289e8d5c223ba41f5b6749d6e0"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.263885 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.265747 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" event={"ID":"dc247eab-6778-41d7-a69d-c551c989814e","Type":"ContainerStarted","Data":"454f239c184b8d8a5ad002291e08a621765ae77cd5baa6ffa26e562e1340c332"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.265782 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" event={"ID":"dc247eab-6778-41d7-a69d-c551c989814e","Type":"ContainerStarted","Data":"3db7b66d35188f2f450a7598a124aa235b5d6ca3fd2f9e2651a9d2d4ea9bdabc"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.270072 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" event={"ID":"fe6d297c-7bfa-4431-9b33-374d4ae3b503","Type":"ContainerStarted","Data":"0a2e9d5f26385967890693935e50199d5a32634bd3b3d552a65375f1b034d01e"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.270114 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.270125 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" event={"ID":"fe6d297c-7bfa-4431-9b33-374d4ae3b503","Type":"ContainerStarted","Data":"3becd37079fb2f0c0caacf87c2781d04243c500a09585be8e5719d8e40f580b1"} Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.271094 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:08 crc kubenswrapper[4948]: E0120 19:51:08.271353 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.771329286 +0000 UTC m=+96.722054255 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.273353 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.292910 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.333034 4948 patch_prober.go:28] interesting pod/console-operator-58897d9998-2gfvd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.333641 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" podUID="fe6d297c-7bfa-4431-9b33-374d4ae3b503" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.333130 4948 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vxm8l container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.333772 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.335109 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/bc3d2e55-288e-4c8c-8a78-cacf02725918-etcd-client\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: E0120 19:51:08.361363 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.861313583 +0000 UTC m=+96.812038552 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.364957 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/34a4c701-23f8-4d4e-97c0-7ceeaa229d0f-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-k4fgt\" (UID: \"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.366208 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/925c0fbe-bc51-41ee-b496-1a83b01918dd-metrics-tls\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.366806 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/35ab84e9-16ce-4c92-b69b-d53854b18979-apiservice-cert\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.367786 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4764a2-50ea-421c-9d14-13189740a541-secret-volume\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.368992 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e860d704-e6b4-4490-8dda-52696e52d75d-proxy-tls\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.378697 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:08 crc kubenswrapper[4948]: E0120 19:51:08.382023 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:08.881997971 +0000 UTC m=+96.832722950 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.430360 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.430910 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzk6g\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-kube-api-access-nzk6g\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.435882 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d9894924-d73d-4e5f-9a04-bf4c6bed159a-serving-cert\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.440292 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/dcc77a74-fa21-4f82-af61-42c73086f4a8-stats-auth\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.440504 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d1267ed5-1f11-4e42-b538-c6d355855019-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.447465 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cf1d582b-c803-4add-9b38-67358e29dd96-proxy-tls\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.507103 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcp74\" (UniqueName: \"kubernetes.io/projected/487f8971-88dc-4ebe-9d67-3b48284c72f9-kube-api-access-zcp74\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.507205 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:08 crc kubenswrapper[4948]: E0120 19:51:08.507695 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:09.007684256 +0000 UTC m=+96.958409225 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.529504 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcp74\" (UniqueName: \"kubernetes.io/projected/487f8971-88dc-4ebe-9d67-3b48284c72f9-kube-api-access-zcp74\") pod \"openshift-apiserver-operator-796bbdcf4f-ts8z9\" (UID: \"487f8971-88dc-4ebe-9d67-3b48284c72f9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.546624 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc3d2e55-288e-4c8c-8a78-cacf02725918-serving-cert\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.688299 4948 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.847475 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.849498 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt6r4\" (UniqueName: \"kubernetes.io/projected/15db69a5-93e7-4777-b31a-800760048d6e-kube-api-access-pt6r4\") pod \"migrator-59844c95c7-l48rg\" (UID: \"15db69a5-93e7-4777-b31a-800760048d6e\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.872104 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/925c0fbe-bc51-41ee-b496-1a83b01918dd-bound-sa-token\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.874473 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.876587 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdsz9\" (UniqueName: \"kubernetes.io/projected/34a4c701-23f8-4d4e-97c0-7ceeaa229d0f-kube-api-access-hdsz9\") pod \"multus-admission-controller-857f4d67dd-k4fgt\" (UID: \"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" Jan 20 19:51:08 crc kubenswrapper[4948]: E0120 19:51:08.898527 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:09.39849282 +0000 UTC m=+97.349217789 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.985126 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:08 crc kubenswrapper[4948]: E0120 19:51:08.986175 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:09.486153424 +0000 UTC m=+97.436878393 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.987141 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-bound-sa-token\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.987962 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.988646 4948 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-ltp2j container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 20 19:51:08 crc kubenswrapper[4948]: I0120 19:51:08.988724 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" podUID="21157116-8790-4342-ba0d-e356baad7ae1" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.046090 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lk9p2\" (UniqueName: \"kubernetes.io/projected/13e58171-7fc1-4feb-bcb5-2737e74615a6-kube-api-access-lk9p2\") pod \"service-ca-9c57cc56f-jcvk4\" (UID: \"13e58171-7fc1-4feb-bcb5-2737e74615a6\") " pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.059685 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t599m\" (UniqueName: \"kubernetes.io/projected/e860d704-e6b4-4490-8dda-52696e52d75d-kube-api-access-t599m\") pod \"machine-config-controller-84d6567774-5dsv5\" (UID: \"e860d704-e6b4-4490-8dda-52696e52d75d\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.069901 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.070002 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.069912 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.090400 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:09 crc kubenswrapper[4948]: E0120 19:51:09.091074 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:09.591057729 +0000 UTC m=+97.541782698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.280373 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdfk2\" (UniqueName: \"kubernetes.io/projected/35ab84e9-16ce-4c92-b69b-d53854b18979-kube-api-access-mdfk2\") pod \"packageserver-d55dfcdfc-wzh2f\" (UID: \"35ab84e9-16ce-4c92-b69b-d53854b18979\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.283219 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjzhv\" (UniqueName: \"kubernetes.io/projected/dcc77a74-fa21-4f82-af61-42c73086f4a8-kube-api-access-mjzhv\") pod \"router-default-5444994796-mqlgr\" (UID: \"dcc77a74-fa21-4f82-af61-42c73086f4a8\") " pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.283566 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kzp5\" (UniqueName: \"kubernetes.io/projected/ea9e37e3-8bd7-4468-991b-2855d3d3385f-kube-api-access-5kzp5\") pod \"catalog-operator-68c6474976-8g7vp\" (UID: \"ea9e37e3-8bd7-4468-991b-2855d3d3385f\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.290728 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xp22\" (UniqueName: \"kubernetes.io/projected/2aae7ee8-ddec-4fce-bfa0-39e13d9135cd-kube-api-access-2xp22\") pod \"kube-storage-version-migrator-operator-b67b599dd-dczh4\" (UID: \"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.291370 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-647fc\" (UniqueName: \"kubernetes.io/projected/203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3-kube-api-access-647fc\") pod \"control-plane-machine-set-operator-78cbb6b69f-4pnmq\" (UID: \"203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.291552 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.291960 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lrmz\" (UniqueName: \"kubernetes.io/projected/d9894924-d73d-4e5f-9a04-bf4c6bed159a-kube-api-access-9lrmz\") pod \"service-ca-operator-777779d784-md5gg\" (UID: \"d9894924-d73d-4e5f-9a04-bf4c6bed159a\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.365978 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d1267ed5-1f11-4e42-b538-c6d355855019-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-85cmp\" (UID: \"d1267ed5-1f11-4e42-b538-c6d355855019\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.366765 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4848a3aa-4912-44e4-a9b3-8b2283a2bd6f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-4vg89\" (UID: \"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.367455 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ftbm\" (UniqueName: \"kubernetes.io/projected/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-kube-api-access-2ftbm\") pod \"marketplace-operator-79b997595-bbslp\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.368201 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcdvv\" (UniqueName: \"kubernetes.io/projected/a15f8225-8436-459c-909a-dcc98d5d35fb-kube-api-access-bcdvv\") pod \"ingress-canary-8sf9d\" (UID: \"a15f8225-8436-459c-909a-dcc98d5d35fb\") " pod="openshift-ingress-canary/ingress-canary-8sf9d" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.370636 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g5vj\" (UniqueName: \"kubernetes.io/projected/666e60ed-f213-4af4-a4a9-969864d1fd0e-kube-api-access-8g5vj\") pod \"machine-api-operator-5694c8668f-hxwlm\" (UID: \"666e60ed-f213-4af4-a4a9-969864d1fd0e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.371312 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxmrv\" (UniqueName: \"kubernetes.io/projected/bc3d2e55-288e-4c8c-8a78-cacf02725918-kube-api-access-hxmrv\") pod \"etcd-operator-b45778765-94v8r\" (UID: \"bc3d2e55-288e-4c8c-8a78-cacf02725918\") " pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.375220 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg"] Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.376558 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.376931 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.377409 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.389979 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-8sf9d" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.421581 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pprj4\" (UniqueName: \"kubernetes.io/projected/f03e94eb-7658-49ed-a576-5ac4cecfe82c-kube-api-access-pprj4\") pod \"openshift-controller-manager-operator-756b6f6bc6-bxbqp\" (UID: \"f03e94eb-7658-49ed-a576-5ac4cecfe82c\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.422613 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:09 crc kubenswrapper[4948]: E0120 19:51:09.427501 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:09.927479463 +0000 UTC m=+97.878204432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.430658 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.500688 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.606390 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.621594 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llsjh\" (UniqueName: \"kubernetes.io/projected/c05cd5ea-b0a0-4314-9676-199d2f7edd7c-kube-api-access-llsjh\") pod \"csi-hostpathplugin-pkc9x\" (UID: \"c05cd5ea-b0a0-4314-9676-199d2f7edd7c\") " pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.623686 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2ntp\" (UniqueName: \"kubernetes.io/projected/cf1d582b-c803-4add-9b38-67358e29dd96-kube-api-access-k2ntp\") pod \"machine-config-operator-74547568cd-nvgzr\" (UID: \"cf1d582b-c803-4add-9b38-67358e29dd96\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.626381 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4w5b\" (UniqueName: \"kubernetes.io/projected/fbe60f4d-9d85-4eb6-8b54-eba15df5d683-kube-api-access-j4w5b\") pod \"olm-operator-6b444d44fb-sxpf7\" (UID: \"fbe60f4d-9d85-4eb6-8b54-eba15df5d683\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.626492 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rglwv\" (UniqueName: \"kubernetes.io/projected/925c0fbe-bc51-41ee-b496-1a83b01918dd-kube-api-access-rglwv\") pod \"ingress-operator-5b745b69d9-bcvw9\" (UID: \"925c0fbe-bc51-41ee-b496-1a83b01918dd\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.650046 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4bcq\" (UniqueName: \"kubernetes.io/projected/ac63d066-004a-468f-a63d-48eae71c9111-kube-api-access-s4bcq\") pod \"package-server-manager-789f6589d5-p46fx\" (UID: \"ac63d066-004a-468f-a63d-48eae71c9111\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.679319 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjfqs\" (UniqueName: \"kubernetes.io/projected/31b15d20-e87f-4c55-8109-ead0574ff43d-kube-api-access-rjfqs\") pod \"dns-default-5svhh\" (UID: \"31b15d20-e87f-4c55-8109-ead0574ff43d\") " pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.728280 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v98lr\" (UniqueName: \"kubernetes.io/projected/b4cfc509-9b4a-4239-9a47-d6af6df02b35-kube-api-access-v98lr\") pod \"machine-config-server-62qsd\" (UID: \"b4cfc509-9b4a-4239-9a47-d6af6df02b35\") " pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.729924 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f4lh\" (UniqueName: \"kubernetes.io/projected/0d4764a2-50ea-421c-9d14-13189740a541-kube-api-access-6f4lh\") pod \"collect-profiles-29482305-7r5qf\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:09 crc kubenswrapper[4948]: E0120 19:51:09.738024 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:10.237955254 +0000 UTC m=+98.188680223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.816573 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" Jan 20 19:51:09 crc kubenswrapper[4948]: I0120 19:51:09.915983 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:09.930187 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.010189 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:10 crc kubenswrapper[4948]: E0120 19:51:10.012031 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:10.511994717 +0000 UTC m=+98.462719686 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.012305 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" event={"ID":"0d15401f-919f-4d4e-b466-91d2d0125952","Type":"ContainerStarted","Data":"41019ace3d21cb21f12c892e63840d19247f42221bb5b548cf83fd4d2b6e78d7"} Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.030077 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.041622 4948 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-b9nsx container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.041971 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" podUID="c22d8773-24ca-45ba-95b2-375bb9ccc6bb" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.042064 4948 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vxm8l container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.042088 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.044384 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.045151 4948 patch_prober.go:28] interesting pod/console-operator-58897d9998-2gfvd container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" start-of-body= Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.045241 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" podUID="fe6d297c-7bfa-4431-9b33-374d4ae3b503" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.15:8443/readyz\": dial tcp 10.217.0.15:8443: connect: connection refused" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.052421 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.053311 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.055348 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.058125 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.126844 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.147805 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.174829 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:10 crc kubenswrapper[4948]: E0120 19:51:10.194613 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:10.694540952 +0000 UTC m=+98.645265921 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.195422 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:10 crc kubenswrapper[4948]: E0120 19:51:10.208986 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:10.708966868 +0000 UTC m=+98.659691837 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.231846 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.248992 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.249517 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.271095 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-62qsd" Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.467219 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:10 crc kubenswrapper[4948]: E0120 19:51:10.467954 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:10.967938367 +0000 UTC m=+98.918663336 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.500296 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-9kr4w"] Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.603258 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:10 crc kubenswrapper[4948]: E0120 19:51:10.603998 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:11.103972787 +0000 UTC m=+99.054697756 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:10 crc kubenswrapper[4948]: I0120 19:51:10.743626 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:10 crc kubenswrapper[4948]: E0120 19:51:10.744196 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:11.2441813 +0000 UTC m=+99.194906269 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.206582 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:11 crc kubenswrapper[4948]: E0120 19:51:11.214903 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:11.714881795 +0000 UTC m=+99.665606764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.332155 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:11 crc kubenswrapper[4948]: E0120 19:51:11.332562 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:11.832548431 +0000 UTC m=+99.783273400 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.355197 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m"] Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.355265 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h"] Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.397136 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" event={"ID":"ac50a1ff-ffd6-4c97-b685-04d5e9740183","Type":"ContainerStarted","Data":"1f03209b4b90e89da7b83d2408ef040533796960832eb19396e0c07d69f48024"} Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.418723 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" event={"ID":"337527e2-a869-4df8-988d-66bf559e348d","Type":"ContainerStarted","Data":"900ac2e2b31c62320d77aaa23571e26858ead00be908b905804a251c45f49df7"} Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.433319 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:11 crc kubenswrapper[4948]: E0120 19:51:11.435379 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:11.93536603 +0000 UTC m=+99.886090989 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.466231 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" event={"ID":"aa3527bc-8d08-4c9a-9349-85d27473d624","Type":"ContainerStarted","Data":"2abbeee839ba6a121b66493616e8c04e5e2d09aae96a531739a3e466905ec5bb"} Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.831256 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:11 crc kubenswrapper[4948]: E0120 19:51:11.831611 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.331596271 +0000 UTC m=+100.282321240 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.883487 4948 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vxm8l container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.883550 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 20 19:51:11 crc kubenswrapper[4948]: I0120 19:51:11.969614 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:11 crc kubenswrapper[4948]: E0120 19:51:11.977514 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.477493731 +0000 UTC m=+100.428218700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.083580 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.084014 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.583991721 +0000 UTC m=+100.534716750 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.084172 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.084465 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.584451373 +0000 UTC m=+100.535176342 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.195877 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.198534 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.200118 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.700091624 +0000 UTC m=+100.650816593 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.221727 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.222390 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.722370015 +0000 UTC m=+100.673094984 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.279948 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dbfcfce6-0ab8-40ba-80b2-d391a7dd5418-metrics-certs\") pod \"network-metrics-daemon-h4c6s\" (UID: \"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418\") " pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.308254 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-h4c6s" Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.332297 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.332897 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.832877094 +0000 UTC m=+100.783602063 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.463982 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.464364 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:12.964352969 +0000 UTC m=+100.915077938 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.485083 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4"] Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.622392 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.622481 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.122462443 +0000 UTC m=+101.073187412 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.622864 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.623214 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.123203503 +0000 UTC m=+101.073928472 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.706518 4948 csr.go:261] certificate signing request csr-vf4p6 is approved, waiting to be issued Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.719809 4948 csr.go:257] certificate signing request csr-vf4p6 is issued Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.723505 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.723981 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.223958226 +0000 UTC m=+101.174683195 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.827274 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.827605 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.327593617 +0000 UTC m=+101.278318586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:12 crc kubenswrapper[4948]: I0120 19:51:12.928689 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:12 crc kubenswrapper[4948]: E0120 19:51:12.929085 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.429070659 +0000 UTC m=+101.379795628 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.030085 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.030643 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.530630363 +0000 UTC m=+101.481355322 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.117174 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" event={"ID":"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4","Type":"ContainerStarted","Data":"c3844dbca0ed3a82c72d02346bb4149703ac4dae2580702d8724984ae32b84dd"} Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.137989 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.138408 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.638394757 +0000 UTC m=+101.589119716 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.142450 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" event={"ID":"0d15401f-919f-4d4e-b466-91d2d0125952","Type":"ContainerStarted","Data":"c657a30b408b0b02499ad5cacdd4087b89ffb3e3d0b27695fbc351cccda24905"} Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.150913 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" event={"ID":"a827077f-10f7-4609-93bc-14cd2b7889b4","Type":"ContainerStarted","Data":"efefab09347eebef2258ff46bdb23da1fb8745c36b868e1c87675557f2527d02"} Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.166674 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-9kr4w" event={"ID":"516ee408-b349-44cd-9ba3-1a486e631818","Type":"ContainerStarted","Data":"8f59fa0759a7f0e14930f627f25e7c11c03aa6f84625ac8decf1d822cd2828df"} Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.270674 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.274277 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.774258572 +0000 UTC m=+101.724983551 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.375060 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.375992 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.875964991 +0000 UTC m=+101.826690000 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.486586 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.486910 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:13.986895612 +0000 UTC m=+101.937620581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.543012 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-8sf9d"] Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.565005 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9"] Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.587060 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-k4fgt"] Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.588209 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.588624 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.08860463 +0000 UTC m=+102.039329609 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.690100 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.690387 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.19037349 +0000 UTC m=+102.141098459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.715631 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg"] Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.722237 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-20 19:46:12 +0000 UTC, rotation deadline is 2026-10-25 09:48:11.60320114 +0000 UTC Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.722263 4948 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6661h56m57.880940336s for next certificate rotation Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.742747 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5"] Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.752788 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-mm2q7" podStartSLOduration=82.752765801 podStartE2EDuration="1m22.752765801s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:13.752123273 +0000 UTC m=+101.702848242" watchObservedRunningTime="2026-01-20 19:51:13.752765801 +0000 UTC m=+101.703490780" Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.792055 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.792384 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.292369277 +0000 UTC m=+102.243094236 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.794225 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-jcvk4"] Jan 20 19:51:13 crc kubenswrapper[4948]: I0120 19:51:13.900105 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:13 crc kubenswrapper[4948]: E0120 19:51:13.922918 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.422891795 +0000 UTC m=+102.373616764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.001338 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-k4c6c" podStartSLOduration=83.001324535 podStartE2EDuration="1m23.001324535s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.000751339 +0000 UTC m=+101.951476308" watchObservedRunningTime="2026-01-20 19:51:14.001324535 +0000 UTC m=+101.952049504" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.002663 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.002913 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.502882298 +0000 UTC m=+102.453607267 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.104609 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.105028 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.605014058 +0000 UTC m=+102.555739027 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.153544 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" podStartSLOduration=82.153526598 podStartE2EDuration="1m22.153526598s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.152595972 +0000 UTC m=+102.103320931" watchObservedRunningTime="2026-01-20 19:51:14.153526598 +0000 UTC m=+102.104251567" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.227915 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.228641 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.728618836 +0000 UTC m=+102.679343805 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.278142 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" podStartSLOduration=82.278117993 podStartE2EDuration="1m22.278117993s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.271972665 +0000 UTC m=+102.222697634" watchObservedRunningTime="2026-01-20 19:51:14.278117993 +0000 UTC m=+102.228842962" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.293055 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-9kr4w" event={"ID":"516ee408-b349-44cd-9ba3-1a486e631818","Type":"ContainerStarted","Data":"f87a7ddd8644cb5765ad5fa83520610a46f13f626758e69a781983fb72575155"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.306762 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.309368 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-62qsd" event={"ID":"b4cfc509-9b4a-4239-9a47-d6af6df02b35","Type":"ContainerStarted","Data":"8bfd0c63f63f265a09c3ce2e0dc03a2a85ea57f43c1e8e8bc4c2643fea6eeaf2"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.319795 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" event={"ID":"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd","Type":"ContainerStarted","Data":"3f5988a90029dcac58a929997a8bb5bcbf7897d4fc8f0a321f5f67e44df48331"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.321904 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-mqlgr" event={"ID":"dcc77a74-fa21-4f82-af61-42c73086f4a8","Type":"ContainerStarted","Data":"6bc368d9385c3d22a3fa19ac1e0f05a2307557ae81292e192efaa8d5645837ed"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.321933 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-mqlgr" event={"ID":"dcc77a74-fa21-4f82-af61-42c73086f4a8","Type":"ContainerStarted","Data":"54b3dfb8487fa61dee57b10ff832016e0e102f1f8c965b867e5c991ad552a970"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.323945 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" event={"ID":"e860d704-e6b4-4490-8dda-52696e52d75d","Type":"ContainerStarted","Data":"63fd4066fe3330b63c4cf9fb2d264c1536763837a6c7babf510dddc336bf8748"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.324576 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" event={"ID":"487f8971-88dc-4ebe-9d67-3b48284c72f9","Type":"ContainerStarted","Data":"4f0ff699856e02dc66888fd55cf0f1e8be148bd8e76188fa16da8828733a0ce8"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.329446 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.329836 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" event={"ID":"15db69a5-93e7-4777-b31a-800760048d6e","Type":"ContainerStarted","Data":"178a1537cfc79c7e0ab963cdbf7876b956ec05c59ef534476cb20c6e24df1e3b"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.338112 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" podStartSLOduration=83.338097848 podStartE2EDuration="1m23.338097848s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.330105369 +0000 UTC m=+102.280830338" watchObservedRunningTime="2026-01-20 19:51:14.338097848 +0000 UTC m=+102.288822817" Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.338375 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.838361305 +0000 UTC m=+102.789086274 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.360041 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" event={"ID":"0a10e0e8-3193-4a13-ae0f-4a20c5e854b4","Type":"ContainerStarted","Data":"975de785eff34c1027c0a351f481e8b69111c08277d88b7f301f2e85fea79581"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.362081 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" event={"ID":"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f","Type":"ContainerStarted","Data":"ef9d365a7484701c34ab5dad43797267716d50f031ea94d4d6ca20517ef020dd"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.362490 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.362533 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.380032 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-8sf9d" event={"ID":"a15f8225-8436-459c-909a-dcc98d5d35fb","Type":"ContainerStarted","Data":"bb3a89aece6bb06a827599810b47ce1b5fd1ab687626fefc4edd9e65a1bf1ae2"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.410365 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" event={"ID":"aa3527bc-8d08-4c9a-9349-85d27473d624","Type":"ContainerStarted","Data":"a44a971c53db1e52f0efc829bf02ffbf4887f5ecd0a42348040dd9f7d9a6b103"} Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.432166 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.432594 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:14.932573818 +0000 UTC m=+102.883298787 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.503608 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.517694 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ng8r8" podStartSLOduration=83.517672481 podStartE2EDuration="1m23.517672481s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.515158892 +0000 UTC m=+102.465883861" watchObservedRunningTime="2026-01-20 19:51:14.517672481 +0000 UTC m=+102.468397450" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.523839 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-5svhh"] Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.536780 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.538173 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.038153022 +0000 UTC m=+102.988878061 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.548320 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-d86b9" podStartSLOduration=83.548303421 podStartE2EDuration="1m23.548303421s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.548065094 +0000 UTC m=+102.498790063" watchObservedRunningTime="2026-01-20 19:51:14.548303421 +0000 UTC m=+102.499028390" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.639495 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.639760 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.139744738 +0000 UTC m=+103.090469707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.639824 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.641206 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.141198687 +0000 UTC m=+103.091923656 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.706653 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-lxvjj" podStartSLOduration=83.706633761 podStartE2EDuration="1m23.706633761s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.70328394 +0000 UTC m=+102.654008909" watchObservedRunningTime="2026-01-20 19:51:14.706633761 +0000 UTC m=+102.657358730" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.741125 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.742038 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.242011501 +0000 UTC m=+103.192736470 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.845321 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" podStartSLOduration=83.845305933 podStartE2EDuration="1m23.845305933s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.843059421 +0000 UTC m=+102.793784390" watchObservedRunningTime="2026-01-20 19:51:14.845305933 +0000 UTC m=+102.796030892" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.846536 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.846961 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.346946958 +0000 UTC m=+103.297671927 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.911776 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-xgspc" podStartSLOduration=83.911758005 podStartE2EDuration="1m23.911758005s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:14.905910964 +0000 UTC m=+102.856635933" watchObservedRunningTime="2026-01-20 19:51:14.911758005 +0000 UTC m=+102.862482974" Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.947325 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.947553 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.447538376 +0000 UTC m=+103.398263345 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:14 crc kubenswrapper[4948]: I0120 19:51:14.947596 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:14 crc kubenswrapper[4948]: E0120 19:51:14.947951 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.447943397 +0000 UTC m=+103.398668356 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.053160 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.053789 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.553774428 +0000 UTC m=+103.504499397 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.065638 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.065693 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.155990 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" podStartSLOduration=84.15597548 podStartE2EDuration="1m24.15597548s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:15.14906116 +0000 UTC m=+103.099786139" watchObservedRunningTime="2026-01-20 19:51:15.15597548 +0000 UTC m=+103.106700449" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.157215 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.157566 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.657553043 +0000 UTC m=+103.608278012 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.259292 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.260150 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.759683602 +0000 UTC m=+103.710408571 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.366306 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-mqlgr" podStartSLOduration=83.366285775 podStartE2EDuration="1m23.366285775s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:15.331058719 +0000 UTC m=+103.281783688" watchObservedRunningTime="2026-01-20 19:51:15.366285775 +0000 UTC m=+103.317010734" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.368926 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.369233 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.869223225 +0000 UTC m=+103.819948194 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.369664 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5rg9m" podStartSLOduration=83.369652897 podStartE2EDuration="1m23.369652897s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:15.368324411 +0000 UTC m=+103.319049400" watchObservedRunningTime="2026-01-20 19:51:15.369652897 +0000 UTC m=+103.320377856" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.370971 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq"] Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.385721 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp"] Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.409608 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-9kr4w" podStartSLOduration=84.409590592 podStartE2EDuration="1m24.409590592s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:15.409230032 +0000 UTC m=+103.359955001" watchObservedRunningTime="2026-01-20 19:51:15.409590592 +0000 UTC m=+103.360315561" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.460205 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5svhh" event={"ID":"31b15d20-e87f-4c55-8109-ead0574ff43d","Type":"ContainerStarted","Data":"68af17c39f3d72bbcbd97b1591d24e6e755958e658c965002d96233592b68fbc"} Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.469976 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.470223 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:15.970207854 +0000 UTC m=+103.920932813 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.481228 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" event={"ID":"a827077f-10f7-4609-93bc-14cd2b7889b4","Type":"ContainerStarted","Data":"a097daf29734b1a96fe95d7d540b796d71441c2b6c392f611f09410f58804b82"} Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.515179 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.515228 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.528556 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" event={"ID":"2aae7ee8-ddec-4fce-bfa0-39e13d9135cd","Type":"ContainerStarted","Data":"fccd539a22993f132012080f14c568c42fb42e4a9246a08be923a155d25a139a"} Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.534030 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" event={"ID":"13e58171-7fc1-4feb-bcb5-2737e74615a6","Type":"ContainerStarted","Data":"258a723955adae28d86d408bbeb1a3726c1732166a02300daf0d2d3563f40d4b"} Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.536763 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" event={"ID":"337527e2-a869-4df8-988d-66bf559e348d","Type":"ContainerStarted","Data":"f078957e0678be3db1410dd7c24be7ae6277fd81b0e8ed68b2954428ce0e3ff7"} Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.571137 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.572338 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-4225h" podStartSLOduration=83.572317333 podStartE2EDuration="1m23.572317333s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:15.507057164 +0000 UTC m=+103.457782133" watchObservedRunningTime="2026-01-20 19:51:15.572317333 +0000 UTC m=+103.523042302" Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.573159 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.073145926 +0000 UTC m=+104.023870895 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.574444 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp"] Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.595543 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-dczh4" podStartSLOduration=83.595524619 podStartE2EDuration="1m23.595524619s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:15.593486093 +0000 UTC m=+103.544211062" watchObservedRunningTime="2026-01-20 19:51:15.595524619 +0000 UTC m=+103.546249588" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.655285 4948 generic.go:334] "Generic (PLEG): container finished" podID="aa3527bc-8d08-4c9a-9349-85d27473d624" containerID="a44a971c53db1e52f0efc829bf02ffbf4887f5ecd0a42348040dd9f7d9a6b103" exitCode=0 Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.655343 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" event={"ID":"aa3527bc-8d08-4c9a-9349-85d27473d624","Type":"ContainerDied","Data":"a44a971c53db1e52f0efc829bf02ffbf4887f5ecd0a42348040dd9f7d9a6b103"} Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.679353 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.680123 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.180100698 +0000 UTC m=+104.130825667 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.681290 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" podStartSLOduration=84.68127163 podStartE2EDuration="1m24.68127163s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:15.67944868 +0000 UTC m=+103.630173649" watchObservedRunningTime="2026-01-20 19:51:15.68127163 +0000 UTC m=+103.631996599" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.684353 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f"] Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.780888 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.781472 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.281460307 +0000 UTC m=+104.232185276 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.782753 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbslp"] Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.882621 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.882826 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.382801655 +0000 UTC m=+104.333526624 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.883166 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.883590 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.383577387 +0000 UTC m=+104.334302356 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.950612 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-62qsd" event={"ID":"b4cfc509-9b4a-4239-9a47-d6af6df02b35","Type":"ContainerStarted","Data":"85e539c0b588d232f822627a7010219f156a38a4cdd67bb1c07c35d46a49c5d0"} Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.972687 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" event={"ID":"f03e94eb-7658-49ed-a576-5ac4cecfe82c","Type":"ContainerStarted","Data":"db01f69373ddd01a9140490b7aaae559b4427654ba3bd1ba3a9191d09ec1821e"} Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.984341 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:15 crc kubenswrapper[4948]: E0120 19:51:15.985457 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.485437439 +0000 UTC m=+104.436162398 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.995177 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:15 crc kubenswrapper[4948]: I0120 19:51:15.995209 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.000089 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.000130 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.071073 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-62qsd" podStartSLOduration=11.071060136 podStartE2EDuration="11.071060136s" podCreationTimestamp="2026-01-20 19:51:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:16.06935443 +0000 UTC m=+104.020079419" watchObservedRunningTime="2026-01-20 19:51:16.071060136 +0000 UTC m=+104.021785105" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.085768 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.087110 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.587093126 +0000 UTC m=+104.537818095 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.116812 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.117660 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.174046 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89"] Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.187305 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.188133 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.688116716 +0000 UTC m=+104.638841685 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.292321 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.300554 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.800535327 +0000 UTC m=+104.751260306 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.303100 4948 patch_prober.go:28] interesting pod/console-f9d7485db-lxvjj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.18:8443/health\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.303165 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lxvjj" podUID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.18:8443/health\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.323559 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-2gfvd" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.358957 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.359512 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.393272 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.393632 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.893613759 +0000 UTC m=+104.844338728 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.402783 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.434101 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-h4c6s"] Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.495515 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.497612 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:16.99760092 +0000 UTC m=+104.948325889 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.499981 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.518899 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:16 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:16 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:16 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.518949 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:16 crc kubenswrapper[4948]: W0120 19:51:16.539125 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbfcfce6_0ab8_40ba_80b2_d391a7dd5418.slice/crio-42e037f7f86da1f86af010a4a6d3b3bef24737ac0b7d8c798636a5935e22bf47 WatchSource:0}: Error finding container 42e037f7f86da1f86af010a4a6d3b3bef24737ac0b7d8c798636a5935e22bf47: Status 404 returned error can't find the container with id 42e037f7f86da1f86af010a4a6d3b3bef24737ac0b7d8c798636a5935e22bf47 Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.599107 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.599452 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:17.099424172 +0000 UTC m=+105.050149141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.599737 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.602150 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:17.102142596 +0000 UTC m=+105.052867565 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.680005 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp"] Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.686510 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx"] Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.700208 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.700525 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:17.200508433 +0000 UTC m=+105.151233402 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: W0120 19:51:16.744813 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1267ed5_1f11_4e42_b538_c6d355855019.slice/crio-119557a38adb8929d38e1bcaf55e37bb509c52b3e2daf2a23ff8b4bf5cb212a1 WatchSource:0}: Error finding container 119557a38adb8929d38e1bcaf55e37bb509c52b3e2daf2a23ff8b4bf5cb212a1: Status 404 returned error can't find the container with id 119557a38adb8929d38e1bcaf55e37bb509c52b3e2daf2a23ff8b4bf5cb212a1 Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.795117 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-hxwlm"] Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.804510 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.805004 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:17.304984817 +0000 UTC m=+105.255709786 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:16 crc kubenswrapper[4948]: I0120 19:51:16.909615 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:16 crc kubenswrapper[4948]: E0120 19:51:16.910012 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:17.409994816 +0000 UTC m=+105.360719785 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.006772 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7"] Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.030659 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:17 crc kubenswrapper[4948]: E0120 19:51:17.031103 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:17.531088405 +0000 UTC m=+105.481813374 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.207169 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:17 crc kubenswrapper[4948]: E0120 19:51:17.207563 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:17.707547753 +0000 UTC m=+105.658272722 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.273676 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.296940 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" event={"ID":"ac63d066-004a-468f-a63d-48eae71c9111","Type":"ContainerStarted","Data":"06f8a87c74354cc46f5274b2b3479bf204c4853fb0f2ad83b3a8b49a018ecd4f"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.305684 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" event={"ID":"aa3527bc-8d08-4c9a-9349-85d27473d624","Type":"ContainerStarted","Data":"8b81d2cdaa603a83e554a610e6ff417eb6f1fd4287d532ce7f5a32efab8955b6"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.307292 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.387214 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:17 crc kubenswrapper[4948]: E0120 19:51:17.387682 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:17.887665081 +0000 UTC m=+105.838390050 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.396933 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" event={"ID":"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f","Type":"ContainerStarted","Data":"f6b2771ec78c63efa6c5ace445263680b8c8c2b0e3ef44e9de31bcf56430c94a"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.511973 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:17 crc kubenswrapper[4948]: E0120 19:51:17.513482 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.013439199 +0000 UTC m=+105.964164168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.557301 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" event={"ID":"ea9e37e3-8bd7-4468-991b-2855d3d3385f","Type":"ContainerStarted","Data":"f5205a66198aae62830b72ff5974748b3e1a4c76299ba34e2bafc460c032bf28"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.557354 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" event={"ID":"ea9e37e3-8bd7-4468-991b-2855d3d3385f","Type":"ContainerStarted","Data":"ea0c8910bedef9b9cab21fc3ac60e9c46dff6a03e59754d6aeed2d27951f12aa"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.558018 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.597936 4948 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-8g7vp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.597983 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" podUID="ea9e37e3-8bd7-4468-991b-2855d3d3385f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.600608 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.601826 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.603794 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.603861 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.612978 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" podStartSLOduration=86.612950358 podStartE2EDuration="1m26.612950358s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:17.598025819 +0000 UTC m=+105.548750788" watchObservedRunningTime="2026-01-20 19:51:17.612950358 +0000 UTC m=+105.563675327" Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.618725 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:17 crc kubenswrapper[4948]: E0120 19:51:17.619528 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.119511338 +0000 UTC m=+106.070236307 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.720256 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" event={"ID":"d1267ed5-1f11-4e42-b538-c6d355855019","Type":"ContainerStarted","Data":"119557a38adb8929d38e1bcaf55e37bb509c52b3e2daf2a23ff8b4bf5cb212a1"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.732648 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:17 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:17 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:17 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.732718 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.732882 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:17 crc kubenswrapper[4948]: E0120 19:51:17.734113 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.234096909 +0000 UTC m=+106.184821878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.851070 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" event={"ID":"487f8971-88dc-4ebe-9d67-3b48284c72f9","Type":"ContainerStarted","Data":"fef52f838ce1e485cc8079aac2202a0e004d346a11fca2a4ce05ba8b09558fe2"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.851319 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:17 crc kubenswrapper[4948]: E0120 19:51:17.851820 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.351791646 +0000 UTC m=+106.302516615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.906471 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" event={"ID":"203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3","Type":"ContainerStarted","Data":"3242e57d7faeaaa95ee3283d73f18d787481ef4781a615bcb4bd0d4e89f2d0d1"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.906514 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" event={"ID":"203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3","Type":"ContainerStarted","Data":"511dd2deb643907b8a402e9765d15a9544f4d0267b776873d19c36377cb5af5b"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.952450 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:17 crc kubenswrapper[4948]: E0120 19:51:17.953786 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.453768791 +0000 UTC m=+106.404493770 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.964306 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-8sf9d" event={"ID":"a15f8225-8436-459c-909a-dcc98d5d35fb","Type":"ContainerStarted","Data":"487b8d67fad1bc99d902952f040eb66c64677cbc706aacb861b6bccdb1f4e2b5"} Jan 20 19:51:17 crc kubenswrapper[4948]: I0120 19:51:17.995439 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" event={"ID":"f03e94eb-7658-49ed-a576-5ac4cecfe82c","Type":"ContainerStarted","Data":"7aca0ad6677ce0bb0d6d8bb775b6f90409a26ccead564b9302746e4d02167059"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.015886 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" event={"ID":"35ab84e9-16ce-4c92-b69b-d53854b18979","Type":"ContainerStarted","Data":"eadcb222a852130c8680763a997c6311d3cfb920999e2dcf41853998d4b2b8aa"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.016423 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.024039 4948 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wzh2f container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.024117 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" podUID="35ab84e9-16ce-4c92-b69b-d53854b18979" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.039442 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" podStartSLOduration=86.039405199 podStartE2EDuration="1m26.039405199s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:17.906148026 +0000 UTC m=+105.856872985" watchObservedRunningTime="2026-01-20 19:51:18.039405199 +0000 UTC m=+105.990130168" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.042917 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ts8z9" podStartSLOduration=87.042908465 podStartE2EDuration="1m27.042908465s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:18.020648745 +0000 UTC m=+105.971373714" watchObservedRunningTime="2026-01-20 19:51:18.042908465 +0000 UTC m=+105.993633434" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.047828 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-md5gg"] Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.047883 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5svhh" event={"ID":"31b15d20-e87f-4c55-8109-ead0574ff43d","Type":"ContainerStarted","Data":"6fb83d30f32f42a167d87a5ca6650f37e23f0f4c0c07210bf462cfbd29c75f66"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.055758 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.056737 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.556687313 +0000 UTC m=+106.507412382 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.070949 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" event={"ID":"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418","Type":"ContainerStarted","Data":"42e037f7f86da1f86af010a4a6d3b3bef24737ac0b7d8c798636a5935e22bf47"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.166872 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.167973 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.667949333 +0000 UTC m=+106.618674372 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.169545 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" event={"ID":"666e60ed-f213-4af4-a4a9-969864d1fd0e","Type":"ContainerStarted","Data":"4dfb5b88545a887c7a6a2654fee74d2dd567f490ce4bf04e51a2ff3c8a9b4cca"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.181166 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-8sf9d" podStartSLOduration=13.181143085 podStartE2EDuration="13.181143085s" podCreationTimestamp="2026-01-20 19:51:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:18.167389188 +0000 UTC m=+106.118114177" watchObservedRunningTime="2026-01-20 19:51:18.181143085 +0000 UTC m=+106.131868054" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.183944 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf"] Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.185614 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" event={"ID":"13e58171-7fc1-4feb-bcb5-2737e74615a6","Type":"ContainerStarted","Data":"7db498ccb6657c5602efd904ba3edc9b51cb672658c774d41cf25a6c5c7bf37b"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.188184 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" event={"ID":"e860d704-e6b4-4490-8dda-52696e52d75d","Type":"ContainerStarted","Data":"2be152bea3b6d0d5ccbd512ae6a275edf0aec57c0df7bbfb27cafe0ff2c572a4"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.268418 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.268770 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.768759007 +0000 UTC m=+106.719483976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.303551 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" event={"ID":"15db69a5-93e7-4777-b31a-800760048d6e","Type":"ContainerStarted","Data":"1e213d989d211b6b666a264863f1b3eb27cc196667d33df87b70bcef8488dfd8"} Jan 20 19:51:18 crc kubenswrapper[4948]: W0120 19:51:18.307492 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4764a2_50ea_421c_9d14_13189740a541.slice/crio-0860553a13454c8059aed120e32aca0a9e2e366c76353f2a1641f2c3ae79c13b WatchSource:0}: Error finding container 0860553a13454c8059aed120e32aca0a9e2e366c76353f2a1641f2c3ae79c13b: Status 404 returned error can't find the container with id 0860553a13454c8059aed120e32aca0a9e2e366c76353f2a1641f2c3ae79c13b Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.317389 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" event={"ID":"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f","Type":"ContainerStarted","Data":"ac4cec6f2ec0e377cf97ee3ca1c44a94c7107c9345e60a1147a948c6ba0903f0"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.369246 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.369546 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.869526799 +0000 UTC m=+106.820251768 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.369640 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.370148 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.870127386 +0000 UTC m=+106.820852415 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.386200 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" event={"ID":"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f","Type":"ContainerStarted","Data":"bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.386242 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.386252 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" event={"ID":"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f","Type":"ContainerStarted","Data":"2d1e4e93ea5cbe0174b2009e834aa6e18c274933e64ef3f3f69484b8f786ffd3"} Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.422107 4948 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bbslp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.422163 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.427493 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-zs4jw" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.477792 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.478610 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:18.97859431 +0000 UTC m=+106.929319279 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.479684 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" podStartSLOduration=86.479659919 podStartE2EDuration="1m26.479659919s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:18.358548269 +0000 UTC m=+106.309273238" watchObservedRunningTime="2026-01-20 19:51:18.479659919 +0000 UTC m=+106.430384888" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.514516 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:18 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:18 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:18 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.514579 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.579846 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.583287 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:19.083273279 +0000 UTC m=+107.033998248 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.596216 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bxbqp" podStartSLOduration=87.596194463 podStartE2EDuration="1m27.596194463s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:18.490785214 +0000 UTC m=+106.441510183" watchObservedRunningTime="2026-01-20 19:51:18.596194463 +0000 UTC m=+106.546919432" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.600204 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4pnmq" podStartSLOduration=86.600186643 podStartE2EDuration="1m26.600186643s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:18.593304354 +0000 UTC m=+106.544029323" watchObservedRunningTime="2026-01-20 19:51:18.600186643 +0000 UTC m=+106.550911612" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.611236 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr"] Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.611272 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-pkc9x"] Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.613320 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9"] Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.647847 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-94v8r"] Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.680511 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.680894 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:19.180877285 +0000 UTC m=+107.131602254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.848016 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.848731 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:19.348694164 +0000 UTC m=+107.299419133 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.859185 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" podStartSLOduration=86.859155441 podStartE2EDuration="1m26.859155441s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:18.667834957 +0000 UTC m=+106.618559926" watchObservedRunningTime="2026-01-20 19:51:18.859155441 +0000 UTC m=+106.809880410" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.869410 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" podStartSLOduration=86.869377682 podStartE2EDuration="1m26.869377682s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:18.864402815 +0000 UTC m=+106.815127784" watchObservedRunningTime="2026-01-20 19:51:18.869377682 +0000 UTC m=+106.820102661" Jan 20 19:51:18 crc kubenswrapper[4948]: I0120 19:51:18.950201 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:18 crc kubenswrapper[4948]: E0120 19:51:18.950935 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:19.450917857 +0000 UTC m=+107.401642826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.060797 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:19 crc kubenswrapper[4948]: E0120 19:51:19.061337 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:19.561313854 +0000 UTC m=+107.512038823 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.170736 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:19 crc kubenswrapper[4948]: E0120 19:51:19.171157 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:19.671138565 +0000 UTC m=+107.621863534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.272343 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:19 crc kubenswrapper[4948]: E0120 19:51:19.272664 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:19.772653948 +0000 UTC m=+107.723378917 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.364202 4948 patch_prober.go:28] interesting pod/apiserver-76f77b778f-k2czh container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]log ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]etcd ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/generic-apiserver-start-informers ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/max-in-flight-filter ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 20 19:51:19 crc kubenswrapper[4948]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 20 19:51:19 crc kubenswrapper[4948]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/project.openshift.io-projectcache ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 20 19:51:19 crc kubenswrapper[4948]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 20 19:51:19 crc kubenswrapper[4948]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 20 19:51:19 crc kubenswrapper[4948]: livez check failed Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.364253 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" podUID="337527e2-a869-4df8-988d-66bf559e348d" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.383476 4948 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-8g7vp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.383518 4948 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-8g7vp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.383625 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" podUID="ea9e37e3-8bd7-4468-991b-2855d3d3385f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.383530 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" podUID="ea9e37e3-8bd7-4468-991b-2855d3d3385f" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.384407 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:19 crc kubenswrapper[4948]: E0120 19:51:19.384847 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:19.884833013 +0000 UTC m=+107.835557982 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.559195 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-jcvk4" podStartSLOduration=87.559170953 podStartE2EDuration="1m27.559170953s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:19.271731573 +0000 UTC m=+107.222456542" watchObservedRunningTime="2026-01-20 19:51:19.559170953 +0000 UTC m=+107.509895922" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.562378 4948 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-6cqcg container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.562405 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" podUID="aa3527bc-8d08-4c9a-9349-85d27473d624" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.563402 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:19 crc kubenswrapper[4948]: E0120 19:51:19.563856 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.063843341 +0000 UTC m=+108.014568310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.567166 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.569230 4948 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wzh2f container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.569278 4948 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-6cqcg container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.569302 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" podUID="35ab84e9-16ce-4c92-b69b-d53854b18979" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.569329 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" podUID="aa3527bc-8d08-4c9a-9349-85d27473d624" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.569583 4948 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wzh2f container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.569602 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" podUID="35ab84e9-16ce-4c92-b69b-d53854b18979" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.588397 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:19 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:19 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:19 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.588452 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.620969 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" event={"ID":"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418","Type":"ContainerStarted","Data":"73ddb2ecadf737996a7f1ae930d466cabc7b80c9c8996be21fd69712e4cca29e"} Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.644525 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" event={"ID":"666e60ed-f213-4af4-a4a9-969864d1fd0e","Type":"ContainerStarted","Data":"d88c2cd8477cab7acfe8cb8c6eea83a0f72e35ec5c711eb73db9ab32adca3b5a"} Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.644565 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" event={"ID":"666e60ed-f213-4af4-a4a9-969864d1fd0e","Type":"ContainerStarted","Data":"f5bdab6572affcd3c97451c42cbe1de3933165d6583b12751e2e6888cb497044"} Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.660470 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" event={"ID":"bc3d2e55-288e-4c8c-8a78-cacf02725918","Type":"ContainerStarted","Data":"ab88e3f81f10de32f1d3b295892ac093827ed337f15bbe1b2746b2c6e6a690f4"} Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.668684 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:19 crc kubenswrapper[4948]: E0120 19:51:19.669979 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.16996407 +0000 UTC m=+108.120689039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.688143 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-5svhh" event={"ID":"31b15d20-e87f-4c55-8109-ead0574ff43d","Type":"ContainerStarted","Data":"736bbbcaa8467dfc784790a888706fbf598bf706048da7bfce5502a0fd120728"} Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.688506 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.712961 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" event={"ID":"925c0fbe-bc51-41ee-b496-1a83b01918dd","Type":"ContainerStarted","Data":"c0b555f7c3520e7fe3fbd0bfdf98e33bf99754cbff6570783660bd03690af9c6"} Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.733076 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" event={"ID":"d1267ed5-1f11-4e42-b538-c6d355855019","Type":"ContainerStarted","Data":"a1dc2a17ac32d42d71260385b36c78fc8d3875797818aea291316788915a6214"} Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.772932 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:19 crc kubenswrapper[4948]: E0120 19:51:19.778646 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.278630859 +0000 UTC m=+108.229355828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:19 crc kubenswrapper[4948]: I0120 19:51:19.822398 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-hxwlm" podStartSLOduration=87.822381169 podStartE2EDuration="1m27.822381169s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:19.821422983 +0000 UTC m=+107.772147952" watchObservedRunningTime="2026-01-20 19:51:19.822381169 +0000 UTC m=+107.773106138" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:19.996622 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:20 crc kubenswrapper[4948]: E0120 19:51:19.997055 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.497016676 +0000 UTC m=+108.447741645 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:19.997125 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:20 crc kubenswrapper[4948]: E0120 19:51:19.998502 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.498478516 +0000 UTC m=+108.449203705 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.097990 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:20 crc kubenswrapper[4948]: E0120 19:51:20.098493 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.598471278 +0000 UTC m=+108.549196247 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.098724 4948 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bbslp container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.098774 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.101369 4948 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bbslp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.101398 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.186960 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-85cmp" podStartSLOduration=88.186937093 podStartE2EDuration="1m28.186937093s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:20.180834595 +0000 UTC m=+108.131559564" watchObservedRunningTime="2026-01-20 19:51:20.186937093 +0000 UTC m=+108.137662062" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.188452 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-5dsv5" event={"ID":"e860d704-e6b4-4490-8dda-52696e52d75d","Type":"ContainerStarted","Data":"c959dd68b898cc6f59a65dcd9a559591c16096a902b3ad23f5ee08510dd5c750"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.196532 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" event={"ID":"c05cd5ea-b0a0-4314-9676-199d2f7edd7c","Type":"ContainerStarted","Data":"c001ded8f0947ecf52ef8868fc442f6bb848f19ef3907752400e61900d288889"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.199383 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:20 crc kubenswrapper[4948]: E0120 19:51:20.199810 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.699796315 +0000 UTC m=+108.650521284 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.201392 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" event={"ID":"cf1d582b-c803-4add-9b38-67358e29dd96","Type":"ContainerStarted","Data":"5b9ef80943bcdfeec24ada67ff4f74d8252e3f81c5ec76443f6f6c03fbe368ff"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.202563 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" event={"ID":"35ab84e9-16ce-4c92-b69b-d53854b18979","Type":"ContainerStarted","Data":"16e9e8871b0922336c3eb35f5712ad5fed9a37994876d849fd7e3364de9b94fe"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.203578 4948 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wzh2f container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.203606 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" podUID="35ab84e9-16ce-4c92-b69b-d53854b18979" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.272851 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.296300 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" event={"ID":"4848a3aa-4912-44e4-a9b3-8b2283a2bd6f","Type":"ContainerStarted","Data":"8ab98bf884acd55939be0cb927f4d0d48926356810c7b33ba289e943e0de1804"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.300285 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:20 crc kubenswrapper[4948]: E0120 19:51:20.301288 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.801270857 +0000 UTC m=+108.751995826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.356009 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" event={"ID":"ac63d066-004a-468f-a63d-48eae71c9111","Type":"ContainerStarted","Data":"96a26c1842fe7da2fc5739506ae7cd36faffd7c2be41277371b291feb67c0c54"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.356094 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" event={"ID":"ac63d066-004a-468f-a63d-48eae71c9111","Type":"ContainerStarted","Data":"bdda6612efbacb695cf8fd1893c19b0b34f8389b67e19214ad8fa2d75da6c3c9"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.356210 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.387082 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" event={"ID":"0d4764a2-50ea-421c-9d14-13189740a541","Type":"ContainerStarted","Data":"fee25ea7a9b28716b72c16edbca7af14b564a44ee895168fea54cb0273c2a921"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.387131 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" event={"ID":"0d4764a2-50ea-421c-9d14-13189740a541","Type":"ContainerStarted","Data":"0860553a13454c8059aed120e32aca0a9e2e366c76353f2a1641f2c3ae79c13b"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.404187 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" event={"ID":"d9894924-d73d-4e5f-9a04-bf4c6bed159a","Type":"ContainerStarted","Data":"a49d181ff3689481b8bf3e381b0ae8d706daef5f87d33377a08a97842f5ca3b9"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.404238 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" event={"ID":"d9894924-d73d-4e5f-9a04-bf4c6bed159a","Type":"ContainerStarted","Data":"ab09170533fa047d674c4795884856e92cd6b8fadc603baa8cf60bfe44e710d7"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.406897 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.411169 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" event={"ID":"34a4c701-23f8-4d4e-97c0-7ceeaa229d0f","Type":"ContainerStarted","Data":"078a1e9ebb4712c035fc0da200220c3dad49b936b9833cc59c10f6047902f567"} Jan 20 19:51:20 crc kubenswrapper[4948]: E0120 19:51:20.411376 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:20.911355326 +0000 UTC m=+108.862080385 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.416925 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-5svhh" podStartSLOduration=15.416891057 podStartE2EDuration="15.416891057s" podCreationTimestamp="2026-01-20 19:51:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:20.310148801 +0000 UTC m=+108.260873770" watchObservedRunningTime="2026-01-20 19:51:20.416891057 +0000 UTC m=+108.367616026" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.441135 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" event={"ID":"fbe60f4d-9d85-4eb6-8b54-eba15df5d683","Type":"ContainerStarted","Data":"9ec2cd588ea1c6d9ab1e0b840678fbb65e90f60b2368e40a49a1eccf538396aa"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.441226 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" event={"ID":"fbe60f4d-9d85-4eb6-8b54-eba15df5d683","Type":"ContainerStarted","Data":"a115740b9fc8f9e7fd2f70c53955559bc8461150c1dd62690ed276010626107b"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.443391 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.444931 4948 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-sxpf7 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.445001 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" podUID="fbe60f4d-9d85-4eb6-8b54-eba15df5d683" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.459886 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" event={"ID":"15db69a5-93e7-4777-b31a-800760048d6e","Type":"ContainerStarted","Data":"474ccb55b6af1297131df681dc43239880f721b3a63e12e63932a914bf111094"} Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.462995 4948 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bbslp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.463062 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.465045 4948 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-6cqcg container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.465082 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" podUID="aa3527bc-8d08-4c9a-9349-85d27473d624" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.485878 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-8g7vp" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.507866 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:20 crc kubenswrapper[4948]: E0120 19:51:20.508379 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.008351525 +0000 UTC m=+108.959076494 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.514995 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:20 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:20 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:20 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.515076 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.676308 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:20 crc kubenswrapper[4948]: E0120 19:51:20.676619 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.176607738 +0000 UTC m=+109.127332707 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.707176 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-4vg89" podStartSLOduration=88.707158405 podStartE2EDuration="1m28.707158405s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:20.70476448 +0000 UTC m=+108.655489449" watchObservedRunningTime="2026-01-20 19:51:20.707158405 +0000 UTC m=+108.657883374" Jan 20 19:51:20 crc kubenswrapper[4948]: I0120 19:51:20.894573 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:20.961807 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.461773365 +0000 UTC m=+109.412498334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.026490 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.027191 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.527169148 +0000 UTC m=+109.477894117 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.115228 4948 patch_prober.go:28] interesting pod/apiserver-76f77b778f-k2czh container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]log ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]etcd ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/generic-apiserver-start-informers ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/max-in-flight-filter ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 20 19:51:21 crc kubenswrapper[4948]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/project.openshift.io-projectcache ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-startinformers ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 20 19:51:21 crc kubenswrapper[4948]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 20 19:51:21 crc kubenswrapper[4948]: livez check failed Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.115377 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" podUID="337527e2-a869-4df8-988d-66bf559e348d" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.131197 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.131665 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.631644493 +0000 UTC m=+109.582369462 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.201247 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" podStartSLOduration=90.20122961 podStartE2EDuration="1m30.20122961s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:21.013036211 +0000 UTC m=+108.963761180" watchObservedRunningTime="2026-01-20 19:51:21.20122961 +0000 UTC m=+109.151954579" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.232599 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.232917 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.732905989 +0000 UTC m=+109.683630958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.334030 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.334320 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.834280078 +0000 UTC m=+109.785005047 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.334467 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.339685 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.839654215 +0000 UTC m=+109.790379194 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.437848 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.438957 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:21.938934987 +0000 UTC m=+109.889659956 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.439882 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-k4fgt" podStartSLOduration=89.439858212 podStartE2EDuration="1m29.439858212s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:21.438945387 +0000 UTC m=+109.389670356" watchObservedRunningTime="2026-01-20 19:51:21.439858212 +0000 UTC m=+109.390583181" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.440800 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" podStartSLOduration=89.440791468 podStartE2EDuration="1m29.440791468s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:21.294582069 +0000 UTC m=+109.245307038" watchObservedRunningTime="2026-01-20 19:51:21.440791468 +0000 UTC m=+109.391516437" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.497119 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" event={"ID":"925c0fbe-bc51-41ee-b496-1a83b01918dd","Type":"ContainerStarted","Data":"2c5e6bb3e4be047e32bbab2463803d0b49d72433e138a9e91a3180474d4a16c1"} Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.497191 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" event={"ID":"925c0fbe-bc51-41ee-b496-1a83b01918dd","Type":"ContainerStarted","Data":"dbd5ee5079b86507a4ecdac35c61755e692be0940595062e09ee5551c4347471"} Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.512261 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" event={"ID":"cf1d582b-c803-4add-9b38-67358e29dd96","Type":"ContainerStarted","Data":"f7b93703cfdd25d9927c191a32acb9a4df7ff26261d61b07ad346f12dc7a97eb"} Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.512332 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" event={"ID":"cf1d582b-c803-4add-9b38-67358e29dd96","Type":"ContainerStarted","Data":"be60d46af7d8dffabfad104f7df0d2dd1e06692e87fd3c0869d11d2dea3bd756"} Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.515010 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:21 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:21 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:21 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.515057 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.515181 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-h4c6s" event={"ID":"dbfcfce6-0ab8-40ba-80b2-d391a7dd5418","Type":"ContainerStarted","Data":"3da72db55a8b6cbfa5d666acefc3b8cab69c3465c5de36d6dedd2b33c47b0bbc"} Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.546037 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" event={"ID":"c05cd5ea-b0a0-4314-9676-199d2f7edd7c","Type":"ContainerStarted","Data":"1d9316d3733405016df2bb9fe49e78b4d022e9d7bb18e133d7945a4149bf4162"} Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.573240 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.575642 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:22.075630324 +0000 UTC m=+110.026355283 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.577321 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" event={"ID":"bc3d2e55-288e-4c8c-8a78-cacf02725918","Type":"ContainerStarted","Data":"df843225aa254283c9f002d554f823ef9c4368f4975f4e697074839c69b21b20"} Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.587423 4948 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-sxpf7 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.587483 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" podUID="fbe60f4d-9d85-4eb6-8b54-eba15df5d683" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.589120 4948 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-wzh2f container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" start-of-body= Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.589157 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" podUID="35ab84e9-16ce-4c92-b69b-d53854b18979" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.43:5443/healthz\": dial tcp 10.217.0.43:5443: connect: connection refused" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.689606 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-md5gg" podStartSLOduration=89.689585138 podStartE2EDuration="1m29.689585138s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:21.603533869 +0000 UTC m=+109.554258838" watchObservedRunningTime="2026-01-20 19:51:21.689585138 +0000 UTC m=+109.640310107" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.779987 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l48rg" podStartSLOduration=89.779970536 podStartE2EDuration="1m29.779970536s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:21.697408063 +0000 UTC m=+109.648133032" watchObservedRunningTime="2026-01-20 19:51:21.779970536 +0000 UTC m=+109.730695505" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.782467 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" podStartSLOduration=89.782447234 podStartE2EDuration="1m29.782447234s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:21.775792102 +0000 UTC m=+109.726517091" watchObservedRunningTime="2026-01-20 19:51:21.782447234 +0000 UTC m=+109.733172203" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.815616 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.818312 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:22.318285367 +0000 UTC m=+110.269010406 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.918269 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:21 crc kubenswrapper[4948]: E0120 19:51:21.919046 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:22.419030729 +0000 UTC m=+110.369755698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.969345 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-bcvw9" podStartSLOduration=89.969325228 podStartE2EDuration="1m29.969325228s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:21.946639866 +0000 UTC m=+109.897364835" watchObservedRunningTime="2026-01-20 19:51:21.969325228 +0000 UTC m=+109.920050197" Jan 20 19:51:21 crc kubenswrapper[4948]: I0120 19:51:21.970742 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-nvgzr" podStartSLOduration=89.970734406 podStartE2EDuration="1m29.970734406s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:21.869434879 +0000 UTC m=+109.820159848" watchObservedRunningTime="2026-01-20 19:51:21.970734406 +0000 UTC m=+109.921459375" Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.031239 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:22 crc kubenswrapper[4948]: E0120 19:51:22.031557 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:22.531542503 +0000 UTC m=+110.482267472 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.051018 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-h4c6s" podStartSLOduration=91.051000997 podStartE2EDuration="1m31.051000997s" podCreationTimestamp="2026-01-20 19:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:22.040272613 +0000 UTC m=+109.990997582" watchObservedRunningTime="2026-01-20 19:51:22.051000997 +0000 UTC m=+110.001725966" Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.108941 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-94v8r" podStartSLOduration=90.108922915 podStartE2EDuration="1m30.108922915s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:22.10693139 +0000 UTC m=+110.057656359" watchObservedRunningTime="2026-01-20 19:51:22.108922915 +0000 UTC m=+110.059647874" Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.132520 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:22 crc kubenswrapper[4948]: E0120 19:51:22.132962 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:22.632946593 +0000 UTC m=+110.583671562 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.234230 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:22 crc kubenswrapper[4948]: E0120 19:51:22.234402 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:22.734374444 +0000 UTC m=+110.685099413 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.234833 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:22 crc kubenswrapper[4948]: E0120 19:51:22.235316 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:22.735294019 +0000 UTC m=+110.686018988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.350046 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:22 crc kubenswrapper[4948]: E0120 19:51:22.350486 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:22.850471526 +0000 UTC m=+110.801196495 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.573594 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:22 crc kubenswrapper[4948]: E0120 19:51:22.574038 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.074024184 +0000 UTC m=+111.024749153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.583307 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:22 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:22 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:22 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.583336 4948 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-sxpf7 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.583364 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.583394 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" podUID="fbe60f4d-9d85-4eb6-8b54-eba15df5d683" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.595962 4948 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-6cqcg container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.596075 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" podUID="aa3527bc-8d08-4c9a-9349-85d27473d624" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.676309 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:22 crc kubenswrapper[4948]: E0120 19:51:22.677689 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.177665576 +0000 UTC m=+111.128390545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:22 crc kubenswrapper[4948]: I0120 19:51:22.803733 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:22 crc kubenswrapper[4948]: E0120 19:51:22.804160 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.304128823 +0000 UTC m=+111.254853802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.026679 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.027241 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.527219179 +0000 UTC m=+111.477944148 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.109071 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-m7lf9"] Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.110079 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.120760 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4l26k"] Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.121858 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.128466 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-utilities\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.128660 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-catalog-content\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.128800 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.129115 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.629103472 +0000 UTC m=+111.579828441 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.129233 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4k45\" (UniqueName: \"kubernetes.io/projected/4e87b4cc-edb1-4541-aff1-83012069d55c-kube-api-access-h4k45\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.130412 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.130673 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.143577 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m7lf9"] Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.216533 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4l26k"] Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.230387 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.230522 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.730507542 +0000 UTC m=+111.681232511 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.230634 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4k45\" (UniqueName: \"kubernetes.io/projected/4e87b4cc-edb1-4541-aff1-83012069d55c-kube-api-access-h4k45\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.230681 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-utilities\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.230759 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk4wx\" (UniqueName: \"kubernetes.io/projected/a443e18f-462b-4c81-9f70-3bae303f278f-kube-api-access-mk4wx\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.230791 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-utilities\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.230823 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-catalog-content\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.230849 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-catalog-content\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.230893 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.231154 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.73114679 +0000 UTC m=+111.681871749 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.231808 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-utilities\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.232073 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-catalog-content\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.331556 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.331695 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.831668366 +0000 UTC m=+111.782393335 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.331808 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-catalog-content\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.331902 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.331980 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-utilities\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.332056 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk4wx\" (UniqueName: \"kubernetes.io/projected/a443e18f-462b-4c81-9f70-3bae303f278f-kube-api-access-mk4wx\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.332236 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-catalog-content\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.332250 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.832232881 +0000 UTC m=+111.782957850 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.332372 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-utilities\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.354557 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2hcgj"] Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.355564 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.377623 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fpw4g"] Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.378765 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.380546 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6cqcg" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.432802 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.432878 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-utilities\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.432973 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-utilities\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.433048 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:23.933031904 +0000 UTC m=+111.883756873 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.433075 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvz5r\" (UniqueName: \"kubernetes.io/projected/aa1c9624-c789-4df8-8c32-eb95e7c40690-kube-api-access-hvz5r\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.433100 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-catalog-content\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.433125 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-catalog-content\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.433146 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v87x\" (UniqueName: \"kubernetes.io/projected/0235a2ef-a094-4747-8aa5-581cb5f665a2-kube-api-access-8v87x\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.447097 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4k45\" (UniqueName: \"kubernetes.io/projected/4e87b4cc-edb1-4541-aff1-83012069d55c-kube-api-access-h4k45\") pod \"community-operators-4l26k\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.450489 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.508403 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:23 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:23 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:23 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.508454 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.525131 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk4wx\" (UniqueName: \"kubernetes.io/projected/a443e18f-462b-4c81-9f70-3bae303f278f-kube-api-access-mk4wx\") pod \"certified-operators-m7lf9\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.539429 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-utilities\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.539491 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.539564 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-utilities\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.539614 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvz5r\" (UniqueName: \"kubernetes.io/projected/aa1c9624-c789-4df8-8c32-eb95e7c40690-kube-api-access-hvz5r\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.539645 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-catalog-content\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.539677 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-catalog-content\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.539722 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v87x\" (UniqueName: \"kubernetes.io/projected/0235a2ef-a094-4747-8aa5-581cb5f665a2-kube-api-access-8v87x\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.540780 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-utilities\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.541047 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.041036265 +0000 UTC m=+111.991761234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.541339 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-utilities\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.541676 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-catalog-content\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.542162 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-catalog-content\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.552192 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2hcgj"] Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.644753 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.644997 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.144984145 +0000 UTC m=+112.095709114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.745580 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.746133 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.746370 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.246358174 +0000 UTC m=+112.197083143 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.882014 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.884080 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.384058479 +0000 UTC m=+112.334783448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.945227 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fpw4g"] Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.975761 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v87x\" (UniqueName: \"kubernetes.io/projected/0235a2ef-a094-4747-8aa5-581cb5f665a2-kube-api-access-8v87x\") pod \"certified-operators-fpw4g\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:23 crc kubenswrapper[4948]: I0120 19:51:23.985290 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:23 crc kubenswrapper[4948]: E0120 19:51:23.985576 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.485565362 +0000 UTC m=+112.436290331 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.015147 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvz5r\" (UniqueName: \"kubernetes.io/projected/aa1c9624-c789-4df8-8c32-eb95e7c40690-kube-api-access-hvz5r\") pod \"community-operators-2hcgj\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.060080 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.101373 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.101724 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.601690156 +0000 UTC m=+112.552415125 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.227536 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.227948 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.727935617 +0000 UTC m=+112.678660586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.272783 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.343607 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.344309 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.844294427 +0000 UTC m=+112.795019396 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.435217 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.435850 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.444876 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.445081 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.445530 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.445947 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:24.945930293 +0000 UTC m=+112.896655272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.557568 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.557948 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.057932184 +0000 UTC m=+113.008657153 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.598596 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:24 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:24 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:24 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.598648 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.611458 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.672084 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da9cc268-da04-4b8a-a9ff-217fa3377832-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"da9cc268-da04-4b8a-a9ff-217fa3377832\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.672121 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da9cc268-da04-4b8a-a9ff-217fa3377832-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"da9cc268-da04-4b8a-a9ff-217fa3377832\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.672189 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.672457 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.172446133 +0000 UTC m=+113.123171102 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.755049 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" event={"ID":"c05cd5ea-b0a0-4314-9676-199d2f7edd7c","Type":"ContainerStarted","Data":"4c55b54932a8d2d1469a900a46da12967976423c50c313fab603f4478b51d512"} Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.788728 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.788869 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da9cc268-da04-4b8a-a9ff-217fa3377832-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"da9cc268-da04-4b8a-a9ff-217fa3377832\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.788999 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da9cc268-da04-4b8a-a9ff-217fa3377832-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"da9cc268-da04-4b8a-a9ff-217fa3377832\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.789072 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.28905489 +0000 UTC m=+113.239779859 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.789070 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da9cc268-da04-4b8a-a9ff-217fa3377832-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"da9cc268-da04-4b8a-a9ff-217fa3377832\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.892016 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.892414 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.392398723 +0000 UTC m=+113.343123692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:24 crc kubenswrapper[4948]: I0120 19:51:24.993653 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:24 crc kubenswrapper[4948]: E0120 19:51:24.994022 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.494006699 +0000 UTC m=+113.444731668 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:24.997991 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da9cc268-da04-4b8a-a9ff-217fa3377832-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"da9cc268-da04-4b8a-a9ff-217fa3377832\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.148691 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:25 crc kubenswrapper[4948]: E0120 19:51:25.149046 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.649033429 +0000 UTC m=+113.599758398 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.160694 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.249485 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:25 crc kubenswrapper[4948]: E0120 19:51:25.249776 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.7497619 +0000 UTC m=+113.700486869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.318271 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lzft6"] Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.319357 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.327351 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.353391 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:25 crc kubenswrapper[4948]: E0120 19:51:25.353821 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.853809203 +0000 UTC m=+113.804534182 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.471433 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.471563 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-utilities\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.471682 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-catalog-content\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.471730 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8v99\" (UniqueName: \"kubernetes.io/projected/2dc4a3ea-7198-4d3c-a592-7734d229d481-kube-api-access-l8v99\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: E0120 19:51:25.471840 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:25.971822909 +0000 UTC m=+113.922547878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.525224 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:25 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:25 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:25 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.525275 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.574100 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-utilities\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.574474 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.574533 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-catalog-content\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.574573 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8v99\" (UniqueName: \"kubernetes.io/projected/2dc4a3ea-7198-4d3c-a592-7734d229d481-kube-api-access-l8v99\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: E0120 19:51:25.575406 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:26.075391328 +0000 UTC m=+114.026116297 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.575784 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-catalog-content\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.597446 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lzft6"] Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.598550 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-utilities\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.625679 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rlfcl"] Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.626721 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.659289 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8v99\" (UniqueName: \"kubernetes.io/projected/2dc4a3ea-7198-4d3c-a592-7734d229d481-kube-api-access-l8v99\") pod \"redhat-marketplace-lzft6\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.694963 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:25 crc kubenswrapper[4948]: E0120 19:51:25.695304 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:26.195288144 +0000 UTC m=+114.146013113 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.774827 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlfcl"] Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.797492 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-utilities\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.797568 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.797647 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bc6k\" (UniqueName: \"kubernetes.io/projected/4c19381d-95b1-4813-8625-da98f07c486f-kube-api-access-6bc6k\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.797688 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-catalog-content\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:25 crc kubenswrapper[4948]: E0120 19:51:25.817083 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:26.317060023 +0000 UTC m=+114.267784992 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.859870 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.900314 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.900896 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-utilities\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.901001 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bc6k\" (UniqueName: \"kubernetes.io/projected/4c19381d-95b1-4813-8625-da98f07c486f-kube-api-access-6bc6k\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.901037 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-catalog-content\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:25 crc kubenswrapper[4948]: E0120 19:51:25.904916 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:26.40489306 +0000 UTC m=+114.355618029 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.916988 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-catalog-content\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:25 crc kubenswrapper[4948]: I0120 19:51:25.977352 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-utilities\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.005466 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:26 crc kubenswrapper[4948]: E0120 19:51:26.006085 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:26.506072984 +0000 UTC m=+114.456797953 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.008667 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-flwsw"] Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.025266 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.036549 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.050073 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.079536 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bc6k\" (UniqueName: \"kubernetes.io/projected/4c19381d-95b1-4813-8625-da98f07c486f-kube-api-access-6bc6k\") pod \"redhat-marketplace-rlfcl\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.174003 4948 patch_prober.go:28] interesting pod/console-f9d7485db-lxvjj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.18:8443/health\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.174089 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lxvjj" podUID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.18:8443/health\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.176934 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.177411 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-catalog-content\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.178203 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-utilities\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.178244 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvx6q\" (UniqueName: \"kubernetes.io/projected/b73db843-a550-4d8e-8aa1-0d6ce047cefe-kube-api-access-lvx6q\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: E0120 19:51:26.178621 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:26.678599374 +0000 UTC m=+114.629324343 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.179249 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-flwsw"] Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.181003 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-k2czh" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.279798 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-catalog-content\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.279846 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.279916 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-utilities\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.279929 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvx6q\" (UniqueName: \"kubernetes.io/projected/b73db843-a550-4d8e-8aa1-0d6ce047cefe-kube-api-access-lvx6q\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: E0120 19:51:26.280387 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:26.780377464 +0000 UTC m=+114.731102433 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.280396 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-catalog-content\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.280689 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-utilities\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.313692 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bslf8"] Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.314777 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.458938 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:26 crc kubenswrapper[4948]: E0120 19:51:26.459356 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:26.95933402 +0000 UTC m=+114.910058989 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.465363 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvx6q\" (UniqueName: \"kubernetes.io/projected/b73db843-a550-4d8e-8aa1-0d6ce047cefe-kube-api-access-lvx6q\") pod \"redhat-operators-flwsw\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.466560 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.498198 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.521918 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:26 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:26 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:26 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.522338 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.582761 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtfgl\" (UniqueName: \"kubernetes.io/projected/31d44844-4319-4456-b6cc-88135734f548-kube-api-access-gtfgl\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.582831 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.582853 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-catalog-content\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.582899 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-utilities\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: E0120 19:51:26.583521 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:27.083509455 +0000 UTC m=+115.034234424 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.706282 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.706517 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtfgl\" (UniqueName: \"kubernetes.io/projected/31d44844-4319-4456-b6cc-88135734f548-kube-api-access-gtfgl\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.706581 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-catalog-content\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.706616 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-utilities\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.707267 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-utilities\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.715451 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-catalog-content\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:26 crc kubenswrapper[4948]: E0120 19:51:26.715567 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:27.215528054 +0000 UTC m=+115.166253023 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.766347 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" event={"ID":"c05cd5ea-b0a0-4314-9676-199d2f7edd7c","Type":"ContainerStarted","Data":"f2264f9a13c2bc54a98474cf4459f7f60fe2da30d78892b3ec5c62fc160a8b87"} Jan 20 19:51:26 crc kubenswrapper[4948]: I0120 19:51:26.939685 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:26 crc kubenswrapper[4948]: E0120 19:51:26.939978 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:27.439967567 +0000 UTC m=+115.390692536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.045440 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.045777 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:27.545690536 +0000 UTC m=+115.496415505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.167478 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.167933 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:27.667918556 +0000 UTC m=+115.618643525 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.203594 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtfgl\" (UniqueName: \"kubernetes.io/projected/31d44844-4319-4456-b6cc-88135734f548-kube-api-access-gtfgl\") pod \"redhat-operators-bslf8\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.340935 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.341602 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:27.841586928 +0000 UTC m=+115.792311897 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.407580 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.442781 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.443142 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:27.943127131 +0000 UTC m=+115.893852100 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.520512 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bslf8"] Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.543902 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.544046 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.044021447 +0000 UTC m=+115.994746416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.544153 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.544474 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.044465049 +0000 UTC m=+115.995190018 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.573584 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.573632 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.574258 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.574274 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.585527 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:27 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:27 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:27 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.585586 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.664173 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.667313 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.167276567 +0000 UTC m=+116.118001536 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.766578 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.767046 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.267031121 +0000 UTC m=+116.217756090 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.810243 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4l26k"] Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.868668 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.869273 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.369251864 +0000 UTC m=+116.319976833 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:27 crc kubenswrapper[4948]: I0120 19:51:27.976272 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:27 crc kubenswrapper[4948]: E0120 19:51:27.976595 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.476583546 +0000 UTC m=+116.427308515 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.093811 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.094140 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.594125348 +0000 UTC m=+116.544850317 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.198569 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.198924 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.698913691 +0000 UTC m=+116.649638660 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.262318 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-5svhh" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.301412 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.301740 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.80172343 +0000 UTC m=+116.752448399 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.402930 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.404103 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:28.904089956 +0000 UTC m=+116.854814915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.504341 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.504770 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.004754266 +0000 UTC m=+116.955479225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.508038 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:28 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:28 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:28 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.508120 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.580225 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2hcgj"] Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.605614 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.605954 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.10593863 +0000 UTC m=+117.056663599 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.606432 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fpw4g"] Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.707749 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.708500 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.208480451 +0000 UTC m=+117.159205420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.711688 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.712575 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.735823 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.735963 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.755130 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.787859 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j"] Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.788083 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" podUID="21157116-8790-4342-ba0d-e356baad7ae1" containerName="route-controller-manager" containerID="cri-o://3719c0e71f9240fa1325a50866f37766f7e6d0a426cdf00678035e77268df85c" gracePeriod=30 Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.813854 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d6d7392e-b25f-4d82-91e0-a623842c5953-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d6d7392e-b25f-4d82-91e0-a623842c5953\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.813962 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.813993 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d6d7392e-b25f-4d82-91e0-a623842c5953-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d6d7392e-b25f-4d82-91e0-a623842c5953\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.814310 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.314298622 +0000 UTC m=+117.265023591 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.866024 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" event={"ID":"c05cd5ea-b0a0-4314-9676-199d2f7edd7c","Type":"ContainerStarted","Data":"0dd5bc6b16da8de26a0aebe0d485da7c9317f972f535587e2ad590a3b4015b64"} Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.893984 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fpw4g" event={"ID":"0235a2ef-a094-4747-8aa5-581cb5f665a2","Type":"ContainerStarted","Data":"a8adec5b2359f950454153a734f1b42c202274e8dd4d6e40699eec012d1841ca"} Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.908583 4948 generic.go:334] "Generic (PLEG): container finished" podID="4e87b4cc-edb1-4541-aff1-83012069d55c" containerID="d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e" exitCode=0 Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.908661 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l26k" event={"ID":"4e87b4cc-edb1-4541-aff1-83012069d55c","Type":"ContainerDied","Data":"d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e"} Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.908690 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l26k" event={"ID":"4e87b4cc-edb1-4541-aff1-83012069d55c","Type":"ContainerStarted","Data":"7aa2ede1634ac35be7f36c7e80da7ab008dab510bc76fd9bdcae0d6ab2edea23"} Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.917497 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.917666 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d6d7392e-b25f-4d82-91e0-a623842c5953-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d6d7392e-b25f-4d82-91e0-a623842c5953\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.917715 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.917757 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d6d7392e-b25f-4d82-91e0-a623842c5953-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d6d7392e-b25f-4d82-91e0-a623842c5953\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.917833 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d6d7392e-b25f-4d82-91e0-a623842c5953-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d6d7392e-b25f-4d82-91e0-a623842c5953\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:28 crc kubenswrapper[4948]: E0120 19:51:28.917899 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.417886142 +0000 UTC m=+117.368611111 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.922171 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2hcgj" event={"ID":"aa1c9624-c789-4df8-8c32-eb95e7c40690","Type":"ContainerStarted","Data":"87073af38e2238e60ce135e7404510b7ddda43a21dc55b4e7adf10457c96e76f"} Jan 20 19:51:28 crc kubenswrapper[4948]: I0120 19:51:28.977456 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d6d7392e-b25f-4d82-91e0-a623842c5953-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d6d7392e-b25f-4d82-91e0-a623842c5953\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.008366 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-pkc9x" podStartSLOduration=24.008348552 podStartE2EDuration="24.008348552s" podCreationTimestamp="2026-01-20 19:51:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:28.939764182 +0000 UTC m=+116.890489161" watchObservedRunningTime="2026-01-20 19:51:29.008348552 +0000 UTC m=+116.959073521" Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.019133 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.020671 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.52065666 +0000 UTC m=+117.471381629 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.037071 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.067511 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-m7lf9"] Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.086189 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.119850 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.120128 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.620113926 +0000 UTC m=+117.570838885 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.203110 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bslf8"] Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.228563 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.228986 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.728970661 +0000 UTC m=+117.679695630 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: W0120 19:51:29.251434 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31d44844_4319_4456_b6cc_88135734f548.slice/crio-272d5887154707aaae1ab5da235f320672d4d8739945b612ffaeb8a735869c50 WatchSource:0}: Error finding container 272d5887154707aaae1ab5da235f320672d4d8739945b612ffaeb8a735869c50: Status 404 returned error can't find the container with id 272d5887154707aaae1ab5da235f320672d4d8739945b612ffaeb8a735869c50 Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.339476 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.340423 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.840399025 +0000 UTC m=+117.791123994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.441182 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.441496 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:29.941483817 +0000 UTC m=+117.892208786 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.454758 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b9nsx"] Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.455152 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" podUID="c22d8773-24ca-45ba-95b2-375bb9ccc6bb" containerName="controller-manager" containerID="cri-o://2ea83b3ba47b15b86978e3b6f1fe7d9be80fa6215281bdf3ca10c701c717a4df" gracePeriod=30 Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.520322 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lzft6"] Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.548624 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.549346 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.049326442 +0000 UTC m=+118.000051421 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.550511 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:29 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:29 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:29 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.550545 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.650894 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.651327 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.151313218 +0000 UTC m=+118.102038187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.656974 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-wzh2f" Jan 20 19:51:29 crc kubenswrapper[4948]: W0120 19:51:29.722594 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2dc4a3ea_7198_4d3c_a592_7734d229d481.slice/crio-a8e545883330fe15952d5347da65f706486ac70cf1e7c82b60d322486f2bee73 WatchSource:0}: Error finding container a8e545883330fe15952d5347da65f706486ac70cf1e7c82b60d322486f2bee73: Status 404 returned error can't find the container with id a8e545883330fe15952d5347da65f706486ac70cf1e7c82b60d322486f2bee73 Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.756234 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.756992 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.256957565 +0000 UTC m=+118.207682534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.757842 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.758223 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.258210879 +0000 UTC m=+118.208935848 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.860221 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.860657 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.360641247 +0000 UTC m=+118.311366206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.961407 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:29 crc kubenswrapper[4948]: E0120 19:51:29.961894 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.461865202 +0000 UTC m=+118.412590181 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.973569 4948 generic.go:334] "Generic (PLEG): container finished" podID="21157116-8790-4342-ba0d-e356baad7ae1" containerID="3719c0e71f9240fa1325a50866f37766f7e6d0a426cdf00678035e77268df85c" exitCode=0 Jan 20 19:51:29 crc kubenswrapper[4948]: I0120 19:51:29.973853 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" event={"ID":"21157116-8790-4342-ba0d-e356baad7ae1","Type":"ContainerDied","Data":"3719c0e71f9240fa1325a50866f37766f7e6d0a426cdf00678035e77268df85c"} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.055379 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sxpf7" Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.062720 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:30 crc kubenswrapper[4948]: E0120 19:51:30.063256 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.563233491 +0000 UTC m=+118.513958460 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.135911 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.137724 4948 generic.go:334] "Generic (PLEG): container finished" podID="c22d8773-24ca-45ba-95b2-375bb9ccc6bb" containerID="2ea83b3ba47b15b86978e3b6f1fe7d9be80fa6215281bdf3ca10c701c717a4df" exitCode=0 Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.137889 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" event={"ID":"c22d8773-24ca-45ba-95b2-375bb9ccc6bb","Type":"ContainerDied","Data":"2ea83b3ba47b15b86978e3b6f1fe7d9be80fa6215281bdf3ca10c701c717a4df"} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.150428 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bslf8" event={"ID":"31d44844-4319-4456-b6cc-88135734f548","Type":"ContainerStarted","Data":"272d5887154707aaae1ab5da235f320672d4d8739945b612ffaeb8a735869c50"} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.151433 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-flwsw"] Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.153651 4948 generic.go:334] "Generic (PLEG): container finished" podID="aa1c9624-c789-4df8-8c32-eb95e7c40690" containerID="d2d7dbeba7f7e26b3179720b734d5edd1232b915fcf79577b96868f1c376ae0d" exitCode=0 Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.153696 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2hcgj" event={"ID":"aa1c9624-c789-4df8-8c32-eb95e7c40690","Type":"ContainerDied","Data":"d2d7dbeba7f7e26b3179720b734d5edd1232b915fcf79577b96868f1c376ae0d"} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.164674 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:30 crc kubenswrapper[4948]: E0120 19:51:30.166093 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.666078381 +0000 UTC m=+118.616803350 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.167331 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m7lf9" event={"ID":"a443e18f-462b-4c81-9f70-3bae303f278f","Type":"ContainerStarted","Data":"e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817"} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.167373 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m7lf9" event={"ID":"a443e18f-462b-4c81-9f70-3bae303f278f","Type":"ContainerStarted","Data":"2346d161d11be9382e639a13a4a2ad0347b94fb675f749934d4db9a83ae7815c"} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.212938 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lzft6" event={"ID":"2dc4a3ea-7198-4d3c-a592-7734d229d481","Type":"ContainerStarted","Data":"a8e545883330fe15952d5347da65f706486ac70cf1e7c82b60d322486f2bee73"} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.241005 4948 generic.go:334] "Generic (PLEG): container finished" podID="0235a2ef-a094-4747-8aa5-581cb5f665a2" containerID="1c0bd8a73d68263e8e7b2dc44b49cee342785962a6625b74a5bc48d3b39e6562" exitCode=0 Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.241102 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fpw4g" event={"ID":"0235a2ef-a094-4747-8aa5-581cb5f665a2","Type":"ContainerDied","Data":"1c0bd8a73d68263e8e7b2dc44b49cee342785962a6625b74a5bc48d3b39e6562"} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.250197 4948 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.270490 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:30 crc kubenswrapper[4948]: E0120 19:51:30.271531 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.771502811 +0000 UTC m=+118.722227850 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.321092 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"da9cc268-da04-4b8a-a9ff-217fa3377832","Type":"ContainerStarted","Data":"87539a81ab1616e8f512d3143eb74a3bbb2537699f6bee3e90a6af676aca1a10"} Jan 20 19:51:30 crc kubenswrapper[4948]: W0120 19:51:30.340857 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb73db843_a550_4d8e_8aa1_0d6ce047cefe.slice/crio-3b205c44aebcb92f8d1578ef94f226a9bb35120612b0aba12ce9a7dfdf77dcc0 WatchSource:0}: Error finding container 3b205c44aebcb92f8d1578ef94f226a9bb35120612b0aba12ce9a7dfdf77dcc0: Status 404 returned error can't find the container with id 3b205c44aebcb92f8d1578ef94f226a9bb35120612b0aba12ce9a7dfdf77dcc0 Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.367849 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlfcl"] Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.375893 4948 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-20T19:51:30.250234438Z","Handler":null,"Name":""} Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.377908 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:30 crc kubenswrapper[4948]: E0120 19:51:30.381243 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.881222089 +0000 UTC m=+118.831947118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.479169 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:30 crc kubenswrapper[4948]: E0120 19:51:30.480060 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-20 19:51:30.980035638 +0000 UTC m=+118.930760607 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.573322 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:30 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:30 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:30 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.573393 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.584861 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:30 crc kubenswrapper[4948]: E0120 19:51:30.585564 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-20 19:51:31.085363536 +0000 UTC m=+119.036088505 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-bwm86" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.604846 4948 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.604929 4948 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.687310 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.724367 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.788786 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.807197 4948 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 20 19:51:30 crc kubenswrapper[4948]: I0120 19:51:30.807243 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.160323 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.362568 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-bwm86\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.380109 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.387463 4948 generic.go:334] "Generic (PLEG): container finished" podID="a443e18f-462b-4c81-9f70-3bae303f278f" containerID="e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817" exitCode=0 Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.387626 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m7lf9" event={"ID":"a443e18f-462b-4c81-9f70-3bae303f278f","Type":"ContainerDied","Data":"e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.428073 4948 generic.go:334] "Generic (PLEG): container finished" podID="2dc4a3ea-7198-4d3c-a592-7734d229d481" containerID="1ab669a3f8b548dca77f3f93943091b7d6cfea5254e61b0f5f144617eeefdd6f" exitCode=0 Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.428191 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lzft6" event={"ID":"2dc4a3ea-7198-4d3c-a592-7734d229d481","Type":"ContainerDied","Data":"1ab669a3f8b548dca77f3f93943091b7d6cfea5254e61b0f5f144617eeefdd6f"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.472747 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.489115 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"d6d7392e-b25f-4d82-91e0-a623842c5953","Type":"ContainerStarted","Data":"dbcbf253c7129e930521b473f8cd327d7000e5314b8ce7c20068538c0a5425d1"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.542346 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:31 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:31 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:31 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.542789 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.546176 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.547744 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-client-ca\") pod \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.547794 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21157116-8790-4342-ba0d-e356baad7ae1-serving-cert\") pod \"21157116-8790-4342-ba0d-e356baad7ae1\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.547823 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-serving-cert\") pod \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.547846 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmhsr\" (UniqueName: \"kubernetes.io/projected/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-kube-api-access-bmhsr\") pod \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.547867 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-client-ca\") pod \"21157116-8790-4342-ba0d-e356baad7ae1\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.547908 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-proxy-ca-bundles\") pod \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.547930 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-config\") pod \"21157116-8790-4342-ba0d-e356baad7ae1\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.547960 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-config\") pod \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\" (UID: \"c22d8773-24ca-45ba-95b2-375bb9ccc6bb\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.550222 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-client-ca" (OuterVolumeSpecName: "client-ca") pod "21157116-8790-4342-ba0d-e356baad7ae1" (UID: "21157116-8790-4342-ba0d-e356baad7ae1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.570051 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-client-ca" (OuterVolumeSpecName: "client-ca") pod "c22d8773-24ca-45ba-95b2-375bb9ccc6bb" (UID: "c22d8773-24ca-45ba-95b2-375bb9ccc6bb"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.571449 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c22d8773-24ca-45ba-95b2-375bb9ccc6bb" (UID: "c22d8773-24ca-45ba-95b2-375bb9ccc6bb"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.602642 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c22d8773-24ca-45ba-95b2-375bb9ccc6bb" (UID: "c22d8773-24ca-45ba-95b2-375bb9ccc6bb"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.611112 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" event={"ID":"c22d8773-24ca-45ba-95b2-375bb9ccc6bb","Type":"ContainerDied","Data":"0f120ebd3be471a6e842b191a142ca11ce8934534eea857340af169658813ea2"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.614773 4948 scope.go:117] "RemoveContainer" containerID="2ea83b3ba47b15b86978e3b6f1fe7d9be80fa6215281bdf3ca10c701c717a4df" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.615101 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-b9nsx" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.630926 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-config" (OuterVolumeSpecName: "config") pod "21157116-8790-4342-ba0d-e356baad7ae1" (UID: "21157116-8790-4342-ba0d-e356baad7ae1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.631260 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-config" (OuterVolumeSpecName: "config") pod "c22d8773-24ca-45ba-95b2-375bb9ccc6bb" (UID: "c22d8773-24ca-45ba-95b2-375bb9ccc6bb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.631751 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-kube-api-access-bmhsr" (OuterVolumeSpecName: "kube-api-access-bmhsr") pod "c22d8773-24ca-45ba-95b2-375bb9ccc6bb" (UID: "c22d8773-24ca-45ba-95b2-375bb9ccc6bb"). InnerVolumeSpecName "kube-api-access-bmhsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.632138 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21157116-8790-4342-ba0d-e356baad7ae1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "21157116-8790-4342-ba0d-e356baad7ae1" (UID: "21157116-8790-4342-ba0d-e356baad7ae1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.648751 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsfg6\" (UniqueName: \"kubernetes.io/projected/21157116-8790-4342-ba0d-e356baad7ae1-kube-api-access-rsfg6\") pod \"21157116-8790-4342-ba0d-e356baad7ae1\" (UID: \"21157116-8790-4342-ba0d-e356baad7ae1\") " Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.649276 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/21157116-8790-4342-ba0d-e356baad7ae1-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.649304 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.649316 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.649328 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmhsr\" (UniqueName: \"kubernetes.io/projected/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-kube-api-access-bmhsr\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.649339 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.649368 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21157116-8790-4342-ba0d-e356baad7ae1-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.649379 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.649391 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c22d8773-24ca-45ba-95b2-375bb9ccc6bb-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.756234 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21157116-8790-4342-ba0d-e356baad7ae1-kube-api-access-rsfg6" (OuterVolumeSpecName: "kube-api-access-rsfg6") pod "21157116-8790-4342-ba0d-e356baad7ae1" (UID: "21157116-8790-4342-ba0d-e356baad7ae1"). InnerVolumeSpecName "kube-api-access-rsfg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.774830 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"da9cc268-da04-4b8a-a9ff-217fa3377832","Type":"ContainerStarted","Data":"edbbd5ff82f271c4edd147f387d6899ce9eeea04440be0d4f91cc1f4d81541ca"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.802661 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlfcl" event={"ID":"4c19381d-95b1-4813-8625-da98f07c486f","Type":"ContainerStarted","Data":"5df219bcf3bf34ace0059c10bcf5c1b860d2c58a0b94c73a3b88bb626fb0d4ed"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.802717 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlfcl" event={"ID":"4c19381d-95b1-4813-8625-da98f07c486f","Type":"ContainerStarted","Data":"2142dac462589be407d179441d186027072d6c86e46c2d2e1bef177fd730a575"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.810857 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=7.810812242 podStartE2EDuration="7.810812242s" podCreationTimestamp="2026-01-20 19:51:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:31.808141528 +0000 UTC m=+119.758866497" watchObservedRunningTime="2026-01-20 19:51:31.810812242 +0000 UTC m=+119.761537211" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.810978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bslf8" event={"ID":"31d44844-4319-4456-b6cc-88135734f548","Type":"ContainerDied","Data":"0ac19e29261806836443b8a565fb019d18ec78f44ab11da9f1aff47b7c84650a"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.810887 4948 generic.go:334] "Generic (PLEG): container finished" podID="31d44844-4319-4456-b6cc-88135734f548" containerID="0ac19e29261806836443b8a565fb019d18ec78f44ab11da9f1aff47b7c84650a" exitCode=0 Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.845095 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flwsw" event={"ID":"b73db843-a550-4d8e-8aa1-0d6ce047cefe","Type":"ContainerStarted","Data":"defb5cb985994e8f6c63ae9d8ae05aaa0ee2d3b1d2e5cdecba1f00f2df3ffcd5"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.845239 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flwsw" event={"ID":"b73db843-a550-4d8e-8aa1-0d6ce047cefe","Type":"ContainerStarted","Data":"3b205c44aebcb92f8d1578ef94f226a9bb35120612b0aba12ce9a7dfdf77dcc0"} Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.856242 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsfg6\" (UniqueName: \"kubernetes.io/projected/21157116-8790-4342-ba0d-e356baad7ae1-kube-api-access-rsfg6\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.978178 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b9nsx"] Jan 20 19:51:31 crc kubenswrapper[4948]: I0120 19:51:31.992138 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-b9nsx"] Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.391442 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827"] Jan 20 19:51:32 crc kubenswrapper[4948]: E0120 19:51:32.391750 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21157116-8790-4342-ba0d-e356baad7ae1" containerName="route-controller-manager" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.391766 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="21157116-8790-4342-ba0d-e356baad7ae1" containerName="route-controller-manager" Jan 20 19:51:32 crc kubenswrapper[4948]: E0120 19:51:32.391787 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c22d8773-24ca-45ba-95b2-375bb9ccc6bb" containerName="controller-manager" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.391795 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c22d8773-24ca-45ba-95b2-375bb9ccc6bb" containerName="controller-manager" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.391962 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="21157116-8790-4342-ba0d-e356baad7ae1" containerName="route-controller-manager" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.391985 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c22d8773-24ca-45ba-95b2-375bb9ccc6bb" containerName="controller-manager" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.393623 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.396659 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.401992 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.402684 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.402889 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.403072 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.403202 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.420374 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827"] Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.466150 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.531549 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:32 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:32 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:32 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.532044 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.600077 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-proxy-ca-bundles\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.600168 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-config\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.600202 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-client-ca\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.600221 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-serving-cert\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.600333 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnsbw\" (UniqueName: \"kubernetes.io/projected/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-kube-api-access-hnsbw\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.718640 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.729779 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-proxy-ca-bundles\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.729988 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-config\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.730131 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-client-ca\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.730207 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-serving-cert\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.730327 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnsbw\" (UniqueName: \"kubernetes.io/projected/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-kube-api-access-hnsbw\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.731314 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c22d8773-24ca-45ba-95b2-375bb9ccc6bb" path="/var/lib/kubelet/pods/c22d8773-24ca-45ba-95b2-375bb9ccc6bb/volumes" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.766489 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.769655 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.774215 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.777018 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-client-ca\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.941276 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.943000 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-config\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.948487 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-proxy-ca-bundles\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:32 crc kubenswrapper[4948]: I0120 19:51:32.952095 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-serving-cert\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.006203 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.012155 4948 generic.go:334] "Generic (PLEG): container finished" podID="b73db843-a550-4d8e-8aa1-0d6ce047cefe" containerID="defb5cb985994e8f6c63ae9d8ae05aaa0ee2d3b1d2e5cdecba1f00f2df3ffcd5" exitCode=0 Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.012232 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flwsw" event={"ID":"b73db843-a550-4d8e-8aa1-0d6ce047cefe","Type":"ContainerDied","Data":"defb5cb985994e8f6c63ae9d8ae05aaa0ee2d3b1d2e5cdecba1f00f2df3ffcd5"} Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.016056 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.044500 4948 generic.go:334] "Generic (PLEG): container finished" podID="da9cc268-da04-4b8a-a9ff-217fa3377832" containerID="edbbd5ff82f271c4edd147f387d6899ce9eeea04440be0d4f91cc1f4d81541ca" exitCode=0 Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.044560 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"da9cc268-da04-4b8a-a9ff-217fa3377832","Type":"ContainerDied","Data":"edbbd5ff82f271c4edd147f387d6899ce9eeea04440be0d4f91cc1f4d81541ca"} Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.061761 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnsbw\" (UniqueName: \"kubernetes.io/projected/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-kube-api-access-hnsbw\") pod \"controller-manager-6ddcd9b6f7-vw827\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.076401 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.076906 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.137174 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" event={"ID":"21157116-8790-4342-ba0d-e356baad7ae1","Type":"ContainerDied","Data":"168ce56662bbbbce72996d545dec4d711bc62bdf444606e3eda248c2859baaf1"} Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.137240 4948 scope.go:117] "RemoveContainer" containerID="3719c0e71f9240fa1325a50866f37766f7e6d0a426cdf00678035e77268df85c" Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.137376 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j" Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.172064 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bwm86"] Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.174498 4948 generic.go:334] "Generic (PLEG): container finished" podID="4c19381d-95b1-4813-8625-da98f07c486f" containerID="5df219bcf3bf34ace0059c10bcf5c1b860d2c58a0b94c73a3b88bb626fb0d4ed" exitCode=0 Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.174569 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlfcl" event={"ID":"4c19381d-95b1-4813-8625-da98f07c486f","Type":"ContainerDied","Data":"5df219bcf3bf34ace0059c10bcf5c1b860d2c58a0b94c73a3b88bb626fb0d4ed"} Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.194437 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j"] Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.204679 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-ltp2j"] Jan 20 19:51:33 crc kubenswrapper[4948]: W0120 19:51:33.338442 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd9173bf0_5a37_423e_94e7_7496bd69f2ee.slice/crio-0a3370b3da01f40da79f4717b7cec1b307052ec393d94db366758841905ec6c0 WatchSource:0}: Error finding container 0a3370b3da01f40da79f4717b7cec1b307052ec393d94db366758841905ec6c0: Status 404 returned error can't find the container with id 0a3370b3da01f40da79f4717b7cec1b307052ec393d94db366758841905ec6c0 Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.542184 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:33 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:33 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:33 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:33 crc kubenswrapper[4948]: I0120 19:51:33.542266 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.398899 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc"] Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.400234 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.403338 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.407892 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.408095 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.408250 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.408419 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.408617 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.428391 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc"] Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.509804 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:34 crc kubenswrapper[4948]: [-]has-synced failed: reason withheld Jan 20 19:51:34 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:34 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.509879 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.509826 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"d6d7392e-b25f-4d82-91e0-a623842c5953","Type":"ContainerStarted","Data":"5193aced6a453e4d2fecd1e944e771489dba6db5bfdace8ca729d987172c4cf6"} Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.562029 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m42jx\" (UniqueName: \"kubernetes.io/projected/6fb12391-143f-44f4-93a4-503c539581bd-kube-api-access-m42jx\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.562161 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-config\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.562232 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-client-ca\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.562258 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fb12391-143f-44f4-93a4-503c539581bd-serving-cert\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.620164 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=6.620129299 podStartE2EDuration="6.620129299s" podCreationTimestamp="2026-01-20 19:51:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:34.599676208 +0000 UTC m=+122.550401187" watchObservedRunningTime="2026-01-20 19:51:34.620129299 +0000 UTC m=+122.570854268" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.658361 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21157116-8790-4342-ba0d-e356baad7ae1" path="/var/lib/kubelet/pods/21157116-8790-4342-ba0d-e356baad7ae1/volumes" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.665500 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-config\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.665620 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-client-ca\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.665646 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fb12391-143f-44f4-93a4-503c539581bd-serving-cert\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.665696 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m42jx\" (UniqueName: \"kubernetes.io/projected/6fb12391-143f-44f4-93a4-503c539581bd-kube-api-access-m42jx\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.666927 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-client-ca\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.670029 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-config\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.676501 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fb12391-143f-44f4-93a4-503c539581bd-serving-cert\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.709509 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m42jx\" (UniqueName: \"kubernetes.io/projected/6fb12391-143f-44f4-93a4-503c539581bd-kube-api-access-m42jx\") pod \"route-controller-manager-7d68f9b447-ptlrc\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.769324 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.847964 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" event={"ID":"d9173bf0-5a37-423e-94e7-7496bd69f2ee","Type":"ContainerStarted","Data":"0a3370b3da01f40da79f4717b7cec1b307052ec393d94db366758841905ec6c0"} Jan 20 19:51:34 crc kubenswrapper[4948]: I0120 19:51:34.848073 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:35 crc kubenswrapper[4948]: I0120 19:51:35.039634 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" podStartSLOduration=103.039613299 podStartE2EDuration="1m43.039613299s" podCreationTimestamp="2026-01-20 19:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:34.988497898 +0000 UTC m=+122.939222867" watchObservedRunningTime="2026-01-20 19:51:35.039613299 +0000 UTC m=+122.990338268" Jan 20 19:51:35 crc kubenswrapper[4948]: I0120 19:51:35.052402 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827"] Jan 20 19:51:35 crc kubenswrapper[4948]: W0120 19:51:35.478912 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0cb69a3_4b68_43d9_825d_e89d1b8fa8b5.slice/crio-a2e9499760240788315f5fedf9f9553350b782d83127493f26a9750f2434d185 WatchSource:0}: Error finding container a2e9499760240788315f5fedf9f9553350b782d83127493f26a9750f2434d185: Status 404 returned error can't find the container with id a2e9499760240788315f5fedf9f9553350b782d83127493f26a9750f2434d185 Jan 20 19:51:35 crc kubenswrapper[4948]: I0120 19:51:35.513034 4948 patch_prober.go:28] interesting pod/router-default-5444994796-mqlgr container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 20 19:51:35 crc kubenswrapper[4948]: [+]has-synced ok Jan 20 19:51:35 crc kubenswrapper[4948]: [+]process-running ok Jan 20 19:51:35 crc kubenswrapper[4948]: healthz check failed Jan 20 19:51:35 crc kubenswrapper[4948]: I0120 19:51:35.513092 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-mqlgr" podUID="dcc77a74-fa21-4f82-af61-42c73086f4a8" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.017975 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" event={"ID":"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5","Type":"ContainerStarted","Data":"a2e9499760240788315f5fedf9f9553350b782d83127493f26a9750f2434d185"} Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.118503 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" event={"ID":"d9173bf0-5a37-423e-94e7-7496bd69f2ee","Type":"ContainerStarted","Data":"6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2"} Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.119888 4948 patch_prober.go:28] interesting pod/console-f9d7485db-lxvjj container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.18:8443/health\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.119939 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-lxvjj" podUID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.18:8443/health\": dial tcp 10.217.0.18:8443: connect: connection refused" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.292172 4948 generic.go:334] "Generic (PLEG): container finished" podID="d6d7392e-b25f-4d82-91e0-a623842c5953" containerID="5193aced6a453e4d2fecd1e944e771489dba6db5bfdace8ca729d987172c4cf6" exitCode=0 Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.292220 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"d6d7392e-b25f-4d82-91e0-a623842c5953","Type":"ContainerDied","Data":"5193aced6a453e4d2fecd1e944e771489dba6db5bfdace8ca729d987172c4cf6"} Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.452172 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.505780 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.508144 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-mqlgr" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.508362 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da9cc268-da04-4b8a-a9ff-217fa3377832-kubelet-dir\") pod \"da9cc268-da04-4b8a-a9ff-217fa3377832\" (UID: \"da9cc268-da04-4b8a-a9ff-217fa3377832\") " Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.508493 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da9cc268-da04-4b8a-a9ff-217fa3377832-kube-api-access\") pod \"da9cc268-da04-4b8a-a9ff-217fa3377832\" (UID: \"da9cc268-da04-4b8a-a9ff-217fa3377832\") " Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.508596 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/da9cc268-da04-4b8a-a9ff-217fa3377832-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "da9cc268-da04-4b8a-a9ff-217fa3377832" (UID: "da9cc268-da04-4b8a-a9ff-217fa3377832"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.508853 4948 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/da9cc268-da04-4b8a-a9ff-217fa3377832-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.532233 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da9cc268-da04-4b8a-a9ff-217fa3377832-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "da9cc268-da04-4b8a-a9ff-217fa3377832" (UID: "da9cc268-da04-4b8a-a9ff-217fa3377832"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.610686 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/da9cc268-da04-4b8a-a9ff-217fa3377832-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:36 crc kubenswrapper[4948]: I0120 19:51:36.777473 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc"] Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.411586 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"da9cc268-da04-4b8a-a9ff-217fa3377832","Type":"ContainerDied","Data":"87539a81ab1616e8f512d3143eb74a3bbb2537699f6bee3e90a6af676aca1a10"} Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.411882 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87539a81ab1616e8f512d3143eb74a3bbb2537699f6bee3e90a6af676aca1a10" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.411967 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.445644 4948 generic.go:334] "Generic (PLEG): container finished" podID="0d4764a2-50ea-421c-9d14-13189740a541" containerID="fee25ea7a9b28716b72c16edbca7af14b564a44ee895168fea54cb0273c2a921" exitCode=0 Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.445768 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" event={"ID":"0d4764a2-50ea-421c-9d14-13189740a541","Type":"ContainerDied","Data":"fee25ea7a9b28716b72c16edbca7af14b564a44ee895168fea54cb0273c2a921"} Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.476155 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" event={"ID":"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5","Type":"ContainerStarted","Data":"ce353bdbe0534364d302c134c9172525fcb75e3a0a2a4555979ccf5aaffd67a7"} Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.477101 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.500748 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" event={"ID":"6fb12391-143f-44f4-93a4-503c539581bd","Type":"ContainerStarted","Data":"7a175b64efcbb523021023bf48dbbad05762b78570194692a6dce65360ab0541"} Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.513545 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.649486 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.649536 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.649576 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.649969 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"f87a7ddd8644cb5765ad5fa83520610a46f13f626758e69a781983fb72575155"} pod="openshift-console/downloads-7954f5f757-9kr4w" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.650040 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" containerID="cri-o://f87a7ddd8644cb5765ad5fa83520610a46f13f626758e69a781983fb72575155" gracePeriod=2 Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.650814 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.650840 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.651019 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.651041 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:37 crc kubenswrapper[4948]: I0120 19:51:37.661347 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" podStartSLOduration=6.66132597 podStartE2EDuration="6.66132597s" podCreationTimestamp="2026-01-20 19:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:37.52953667 +0000 UTC m=+125.480261639" watchObservedRunningTime="2026-01-20 19:51:37.66132597 +0000 UTC m=+125.612050949" Jan 20 19:51:38 crc kubenswrapper[4948]: I0120 19:51:38.830053 4948 generic.go:334] "Generic (PLEG): container finished" podID="516ee408-b349-44cd-9ba3-1a486e631818" containerID="f87a7ddd8644cb5765ad5fa83520610a46f13f626758e69a781983fb72575155" exitCode=0 Jan 20 19:51:38 crc kubenswrapper[4948]: I0120 19:51:38.830299 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-9kr4w" event={"ID":"516ee408-b349-44cd-9ba3-1a486e631818","Type":"ContainerDied","Data":"f87a7ddd8644cb5765ad5fa83520610a46f13f626758e69a781983fb72575155"} Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.005058 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" event={"ID":"6fb12391-143f-44f4-93a4-503c539581bd","Type":"ContainerStarted","Data":"be7592ceef85f2d996ca26e60c39e3b83bd81e945ebe061d417b96bb064adea9"} Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.005097 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.022902 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.046297 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" podStartSLOduration=10.046279282 podStartE2EDuration="10.046279282s" podCreationTimestamp="2026-01-20 19:51:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:51:39.042661122 +0000 UTC m=+126.993386111" watchObservedRunningTime="2026-01-20 19:51:39.046279282 +0000 UTC m=+126.997004251" Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.371761 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.454788 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d6d7392e-b25f-4d82-91e0-a623842c5953-kubelet-dir\") pod \"d6d7392e-b25f-4d82-91e0-a623842c5953\" (UID: \"d6d7392e-b25f-4d82-91e0-a623842c5953\") " Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.454939 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d6d7392e-b25f-4d82-91e0-a623842c5953-kube-api-access\") pod \"d6d7392e-b25f-4d82-91e0-a623842c5953\" (UID: \"d6d7392e-b25f-4d82-91e0-a623842c5953\") " Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.454930 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d6d7392e-b25f-4d82-91e0-a623842c5953-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d6d7392e-b25f-4d82-91e0-a623842c5953" (UID: "d6d7392e-b25f-4d82-91e0-a623842c5953"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.455379 4948 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d6d7392e-b25f-4d82-91e0-a623842c5953-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.600345 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6d7392e-b25f-4d82-91e0-a623842c5953-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d6d7392e-b25f-4d82-91e0-a623842c5953" (UID: "d6d7392e-b25f-4d82-91e0-a623842c5953"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:51:39 crc kubenswrapper[4948]: I0120 19:51:39.662279 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d6d7392e-b25f-4d82-91e0-a623842c5953-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.077657 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.093873 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.095326 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"d6d7392e-b25f-4d82-91e0-a623842c5953","Type":"ContainerDied","Data":"dbcbf253c7129e930521b473f8cd327d7000e5314b8ce7c20068538c0a5425d1"} Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.097215 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dbcbf253c7129e930521b473f8cd327d7000e5314b8ce7c20068538c0a5425d1" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.115147 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" event={"ID":"0d4764a2-50ea-421c-9d14-13189740a541","Type":"ContainerDied","Data":"0860553a13454c8059aed120e32aca0a9e2e366c76353f2a1641f2c3ae79c13b"} Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.115196 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0860553a13454c8059aed120e32aca0a9e2e366c76353f2a1641f2c3ae79c13b" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.115265 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.158795 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-9kr4w" event={"ID":"516ee408-b349-44cd-9ba3-1a486e631818","Type":"ContainerStarted","Data":"da406485a1144dfc8da6d560b7e425375ec00e012f97f493baa896293690f690"} Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.159461 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.159520 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.159544 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.205539 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6f4lh\" (UniqueName: \"kubernetes.io/projected/0d4764a2-50ea-421c-9d14-13189740a541-kube-api-access-6f4lh\") pod \"0d4764a2-50ea-421c-9d14-13189740a541\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.205613 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4764a2-50ea-421c-9d14-13189740a541-config-volume\") pod \"0d4764a2-50ea-421c-9d14-13189740a541\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.205658 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4764a2-50ea-421c-9d14-13189740a541-secret-volume\") pod \"0d4764a2-50ea-421c-9d14-13189740a541\" (UID: \"0d4764a2-50ea-421c-9d14-13189740a541\") " Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.207313 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d4764a2-50ea-421c-9d14-13189740a541-config-volume" (OuterVolumeSpecName: "config-volume") pod "0d4764a2-50ea-421c-9d14-13189740a541" (UID: "0d4764a2-50ea-421c-9d14-13189740a541"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.308105 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0d4764a2-50ea-421c-9d14-13189740a541-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.462892 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d4764a2-50ea-421c-9d14-13189740a541-kube-api-access-6f4lh" (OuterVolumeSpecName: "kube-api-access-6f4lh") pod "0d4764a2-50ea-421c-9d14-13189740a541" (UID: "0d4764a2-50ea-421c-9d14-13189740a541"). InnerVolumeSpecName "kube-api-access-6f4lh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.465668 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d4764a2-50ea-421c-9d14-13189740a541-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0d4764a2-50ea-421c-9d14-13189740a541" (UID: "0d4764a2-50ea-421c-9d14-13189740a541"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.562757 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6f4lh\" (UniqueName: \"kubernetes.io/projected/0d4764a2-50ea-421c-9d14-13189740a541-kube-api-access-6f4lh\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:40 crc kubenswrapper[4948]: I0120 19:51:40.562817 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0d4764a2-50ea-421c-9d14-13189740a541-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:41 crc kubenswrapper[4948]: I0120 19:51:41.173846 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:41 crc kubenswrapper[4948]: I0120 19:51:41.174173 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:42 crc kubenswrapper[4948]: I0120 19:51:42.255869 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:42 crc kubenswrapper[4948]: I0120 19:51:42.255920 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:45 crc kubenswrapper[4948]: I0120 19:51:45.007203 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827"] Jan 20 19:51:45 crc kubenswrapper[4948]: I0120 19:51:45.007769 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" podUID="c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" containerName="controller-manager" containerID="cri-o://ce353bdbe0534364d302c134c9172525fcb75e3a0a2a4555979ccf5aaffd67a7" gracePeriod=30 Jan 20 19:51:45 crc kubenswrapper[4948]: I0120 19:51:45.116115 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc"] Jan 20 19:51:45 crc kubenswrapper[4948]: I0120 19:51:45.116348 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" podUID="6fb12391-143f-44f4-93a4-503c539581bd" containerName="route-controller-manager" containerID="cri-o://be7592ceef85f2d996ca26e60c39e3b83bd81e945ebe061d417b96bb064adea9" gracePeriod=30 Jan 20 19:51:46 crc kubenswrapper[4948]: I0120 19:51:46.132170 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:46 crc kubenswrapper[4948]: I0120 19:51:46.142236 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 19:51:46 crc kubenswrapper[4948]: I0120 19:51:46.425616 4948 generic.go:334] "Generic (PLEG): container finished" podID="c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" containerID="ce353bdbe0534364d302c134c9172525fcb75e3a0a2a4555979ccf5aaffd67a7" exitCode=0 Jan 20 19:51:46 crc kubenswrapper[4948]: I0120 19:51:46.425732 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" event={"ID":"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5","Type":"ContainerDied","Data":"ce353bdbe0534364d302c134c9172525fcb75e3a0a2a4555979ccf5aaffd67a7"} Jan 20 19:51:46 crc kubenswrapper[4948]: I0120 19:51:46.436767 4948 generic.go:334] "Generic (PLEG): container finished" podID="6fb12391-143f-44f4-93a4-503c539581bd" containerID="be7592ceef85f2d996ca26e60c39e3b83bd81e945ebe061d417b96bb064adea9" exitCode=0 Jan 20 19:51:46 crc kubenswrapper[4948]: I0120 19:51:46.436873 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" event={"ID":"6fb12391-143f-44f4-93a4-503c539581bd","Type":"ContainerDied","Data":"be7592ceef85f2d996ca26e60c39e3b83bd81e945ebe061d417b96bb064adea9"} Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.449518 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" event={"ID":"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5","Type":"ContainerDied","Data":"a2e9499760240788315f5fedf9f9553350b782d83127493f26a9750f2434d185"} Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.449776 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2e9499760240788315f5fedf9f9553350b782d83127493f26a9750f2434d185" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.512731 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.567157 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.571261 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.571313 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.571494 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.571523 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.603795 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-86d66fccd8-rmbmx"] Jan 20 19:51:47 crc kubenswrapper[4948]: E0120 19:51:47.604096 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" containerName="controller-manager" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604110 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" containerName="controller-manager" Jan 20 19:51:47 crc kubenswrapper[4948]: E0120 19:51:47.604122 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6d7392e-b25f-4d82-91e0-a623842c5953" containerName="pruner" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604128 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6d7392e-b25f-4d82-91e0-a623842c5953" containerName="pruner" Jan 20 19:51:47 crc kubenswrapper[4948]: E0120 19:51:47.604138 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da9cc268-da04-4b8a-a9ff-217fa3377832" containerName="pruner" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604145 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="da9cc268-da04-4b8a-a9ff-217fa3377832" containerName="pruner" Jan 20 19:51:47 crc kubenswrapper[4948]: E0120 19:51:47.604157 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d4764a2-50ea-421c-9d14-13189740a541" containerName="collect-profiles" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604162 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d4764a2-50ea-421c-9d14-13189740a541" containerName="collect-profiles" Jan 20 19:51:47 crc kubenswrapper[4948]: E0120 19:51:47.604174 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fb12391-143f-44f4-93a4-503c539581bd" containerName="route-controller-manager" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604180 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fb12391-143f-44f4-93a4-503c539581bd" containerName="route-controller-manager" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604290 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d4764a2-50ea-421c-9d14-13189740a541" containerName="collect-profiles" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604302 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fb12391-143f-44f4-93a4-503c539581bd" containerName="route-controller-manager" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604310 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6d7392e-b25f-4d82-91e0-a623842c5953" containerName="pruner" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604316 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="da9cc268-da04-4b8a-a9ff-217fa3377832" containerName="pruner" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604326 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" containerName="controller-manager" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.604773 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.611187 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-86d66fccd8-rmbmx"] Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.625058 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-config\") pod \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.625204 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-serving-cert\") pod \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.625240 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-client-ca\") pod \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.625287 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnsbw\" (UniqueName: \"kubernetes.io/projected/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-kube-api-access-hnsbw\") pod \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.625311 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-proxy-ca-bundles\") pod \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\" (UID: \"c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.626414 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" (UID: "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.627038 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-config" (OuterVolumeSpecName: "config") pod "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" (UID: "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.631656 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-client-ca" (OuterVolumeSpecName: "client-ca") pod "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" (UID: "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.638107 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-kube-api-access-hnsbw" (OuterVolumeSpecName: "kube-api-access-hnsbw") pod "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" (UID: "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5"). InnerVolumeSpecName "kube-api-access-hnsbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.645975 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" (UID: "c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.726122 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-client-ca\") pod \"6fb12391-143f-44f4-93a4-503c539581bd\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.726541 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fb12391-143f-44f4-93a4-503c539581bd-serving-cert\") pod \"6fb12391-143f-44f4-93a4-503c539581bd\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.726577 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-config\") pod \"6fb12391-143f-44f4-93a4-503c539581bd\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.726658 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m42jx\" (UniqueName: \"kubernetes.io/projected/6fb12391-143f-44f4-93a4-503c539581bd-kube-api-access-m42jx\") pod \"6fb12391-143f-44f4-93a4-503c539581bd\" (UID: \"6fb12391-143f-44f4-93a4-503c539581bd\") " Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.727772 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f2de83-3044-4b23-943c-bcd26f659fb1-serving-cert\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.727921 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-client-ca\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.727991 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqkm7\" (UniqueName: \"kubernetes.io/projected/62f2de83-3044-4b23-943c-bcd26f659fb1-kube-api-access-mqkm7\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.728085 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-config\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.728177 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-proxy-ca-bundles\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.728305 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.728322 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.728334 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.728348 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnsbw\" (UniqueName: \"kubernetes.io/projected/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-kube-api-access-hnsbw\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.728360 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.729071 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-client-ca" (OuterVolumeSpecName: "client-ca") pod "6fb12391-143f-44f4-93a4-503c539581bd" (UID: "6fb12391-143f-44f4-93a4-503c539581bd"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.732344 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-config" (OuterVolumeSpecName: "config") pod "6fb12391-143f-44f4-93a4-503c539581bd" (UID: "6fb12391-143f-44f4-93a4-503c539581bd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.734675 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fb12391-143f-44f4-93a4-503c539581bd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6fb12391-143f-44f4-93a4-503c539581bd" (UID: "6fb12391-143f-44f4-93a4-503c539581bd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.735814 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fb12391-143f-44f4-93a4-503c539581bd-kube-api-access-m42jx" (OuterVolumeSpecName: "kube-api-access-m42jx") pod "6fb12391-143f-44f4-93a4-503c539581bd" (UID: "6fb12391-143f-44f4-93a4-503c539581bd"). InnerVolumeSpecName "kube-api-access-m42jx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895415 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-proxy-ca-bundles\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895608 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f2de83-3044-4b23-943c-bcd26f659fb1-serving-cert\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895666 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-client-ca\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895718 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqkm7\" (UniqueName: \"kubernetes.io/projected/62f2de83-3044-4b23-943c-bcd26f659fb1-kube-api-access-mqkm7\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895793 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-config\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895847 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895861 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fb12391-143f-44f4-93a4-503c539581bd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895873 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fb12391-143f-44f4-93a4-503c539581bd-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.895884 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m42jx\" (UniqueName: \"kubernetes.io/projected/6fb12391-143f-44f4-93a4-503c539581bd-kube-api-access-m42jx\") on node \"crc\" DevicePath \"\"" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.899513 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f2de83-3044-4b23-943c-bcd26f659fb1-serving-cert\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.900576 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-config\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.900968 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-proxy-ca-bundles\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.902156 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-client-ca\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.916357 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqkm7\" (UniqueName: \"kubernetes.io/projected/62f2de83-3044-4b23-943c-bcd26f659fb1-kube-api-access-mqkm7\") pod \"controller-manager-86d66fccd8-rmbmx\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:47 crc kubenswrapper[4948]: I0120 19:51:47.964856 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:51:48 crc kubenswrapper[4948]: I0120 19:51:48.545270 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827" Jan 20 19:51:48 crc kubenswrapper[4948]: I0120 19:51:48.545284 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" Jan 20 19:51:48 crc kubenswrapper[4948]: I0120 19:51:48.545305 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc" event={"ID":"6fb12391-143f-44f4-93a4-503c539581bd","Type":"ContainerDied","Data":"7a175b64efcbb523021023bf48dbbad05762b78570194692a6dce65360ab0541"} Jan 20 19:51:48 crc kubenswrapper[4948]: I0120 19:51:48.546241 4948 scope.go:117] "RemoveContainer" containerID="be7592ceef85f2d996ca26e60c39e3b83bd81e945ebe061d417b96bb064adea9" Jan 20 19:51:48 crc kubenswrapper[4948]: I0120 19:51:48.595844 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827"] Jan 20 19:51:48 crc kubenswrapper[4948]: I0120 19:51:48.595881 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6ddcd9b6f7-vw827"] Jan 20 19:51:48 crc kubenswrapper[4948]: I0120 19:51:48.599509 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc"] Jan 20 19:51:48 crc kubenswrapper[4948]: I0120 19:51:48.604171 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d68f9b447-ptlrc"] Jan 20 19:51:49 crc kubenswrapper[4948]: I0120 19:51:49.979692 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t"] Jan 20 19:51:49 crc kubenswrapper[4948]: I0120 19:51:49.984431 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:49 crc kubenswrapper[4948]: I0120 19:51:49.992427 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 19:51:49 crc kubenswrapper[4948]: I0120 19:51:49.993592 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 19:51:49 crc kubenswrapper[4948]: I0120 19:51:49.997482 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 19:51:49 crc kubenswrapper[4948]: I0120 19:51:49.998853 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t"] Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.032680 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.034335 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.035816 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.079837 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-client-ca\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.079905 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-config\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.080001 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d95cc352-8fc3-423f-b035-512e1d0973a0-serving-cert\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.080021 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g765p\" (UniqueName: \"kubernetes.io/projected/d95cc352-8fc3-423f-b035-512e1d0973a0-kube-api-access-g765p\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.172392 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-p46fx" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.187211 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-client-ca\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.187269 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-config\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.187558 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d95cc352-8fc3-423f-b035-512e1d0973a0-serving-cert\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.187817 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g765p\" (UniqueName: \"kubernetes.io/projected/d95cc352-8fc3-423f-b035-512e1d0973a0-kube-api-access-g765p\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.190494 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-config\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.191595 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-client-ca\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.201487 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d95cc352-8fc3-423f-b035-512e1d0973a0-serving-cert\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.295275 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g765p\" (UniqueName: \"kubernetes.io/projected/d95cc352-8fc3-423f-b035-512e1d0973a0-kube-api-access-g765p\") pod \"route-controller-manager-77bfd6bcc7-rgk7t\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.361500 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.494745 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-86d66fccd8-rmbmx"] Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.584808 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fb12391-143f-44f4-93a4-503c539581bd" path="/var/lib/kubelet/pods/6fb12391-143f-44f4-93a4-503c539581bd/volumes" Jan 20 19:51:50 crc kubenswrapper[4948]: I0120 19:51:50.587046 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5" path="/var/lib/kubelet/pods/c0cb69a3-4b68-43d9-825d-e89d1b8fa8b5/volumes" Jan 20 19:51:51 crc kubenswrapper[4948]: I0120 19:51:51.595063 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:51:57 crc kubenswrapper[4948]: I0120 19:51:57.568613 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:57 crc kubenswrapper[4948]: I0120 19:51:57.568933 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:57 crc kubenswrapper[4948]: I0120 19:51:57.576826 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:51:57 crc kubenswrapper[4948]: I0120 19:51:57.576893 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.859174 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.859220 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.859268 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.859332 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.862006 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.862187 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.862386 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.870899 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.872960 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.881230 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.885580 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.901160 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.916375 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:51:58 crc kubenswrapper[4948]: I0120 19:51:58.942885 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 20 19:51:59 crc kubenswrapper[4948]: I0120 19:51:59.067784 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 20 19:52:02 crc kubenswrapper[4948]: I0120 19:52:02.300466 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" event={"ID":"62f2de83-3044-4b23-943c-bcd26f659fb1","Type":"ContainerStarted","Data":"5fce88fcaaabc12b4c52e003805659ac7f0c4b1716991a0c538e14ed98d260a1"} Jan 20 19:52:02 crc kubenswrapper[4948]: I0120 19:52:02.765666 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t"] Jan 20 19:52:04 crc kubenswrapper[4948]: I0120 19:52:04.894977 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-86d66fccd8-rmbmx"] Jan 20 19:52:04 crc kubenswrapper[4948]: I0120 19:52:04.904433 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t"] Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.098386 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.099978 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.103494 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.106557 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.115467 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.126664 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e4a2cbe-b256-4833-865f-dea42e49f241-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1e4a2cbe-b256-4833-865f-dea42e49f241\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.126733 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e4a2cbe-b256-4833-865f-dea42e49f241-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1e4a2cbe-b256-4833-865f-dea42e49f241\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.228360 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e4a2cbe-b256-4833-865f-dea42e49f241-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1e4a2cbe-b256-4833-865f-dea42e49f241\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.228438 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e4a2cbe-b256-4833-865f-dea42e49f241-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1e4a2cbe-b256-4833-865f-dea42e49f241\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.228530 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e4a2cbe-b256-4833-865f-dea42e49f241-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1e4a2cbe-b256-4833-865f-dea42e49f241\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.264505 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e4a2cbe-b256-4833-865f-dea42e49f241-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1e4a2cbe-b256-4833-865f-dea42e49f241\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.429741 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.567930 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.568030 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.568107 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.569183 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"da406485a1144dfc8da6d560b7e425375ec00e012f97f493baa896293690f690"} pod="openshift-console/downloads-7954f5f757-9kr4w" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.569221 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.569283 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.569234 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" containerID="cri-o://da406485a1144dfc8da6d560b7e425375ec00e012f97f493baa896293690f690" gracePeriod=2 Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.570194 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:52:07 crc kubenswrapper[4948]: I0120 19:52:07.570259 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:52:08 crc kubenswrapper[4948]: I0120 19:52:08.356649 4948 generic.go:334] "Generic (PLEG): container finished" podID="516ee408-b349-44cd-9ba3-1a486e631818" containerID="da406485a1144dfc8da6d560b7e425375ec00e012f97f493baa896293690f690" exitCode=0 Jan 20 19:52:08 crc kubenswrapper[4948]: I0120 19:52:08.356755 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-9kr4w" event={"ID":"516ee408-b349-44cd-9ba3-1a486e631818","Type":"ContainerDied","Data":"da406485a1144dfc8da6d560b7e425375ec00e012f97f493baa896293690f690"} Jan 20 19:52:08 crc kubenswrapper[4948]: I0120 19:52:08.357061 4948 scope.go:117] "RemoveContainer" containerID="f87a7ddd8644cb5765ad5fa83520610a46f13f626758e69a781983fb72575155" Jan 20 19:52:11 crc kubenswrapper[4948]: I0120 19:52:11.377388 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" event={"ID":"d95cc352-8fc3-423f-b035-512e1d0973a0","Type":"ContainerStarted","Data":"0ac9af2bbc288b2882ce569fb32216ee79bcf5ee88a76203b89672fbbfbab2c3"} Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.306887 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.308304 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.326584 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.406029 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kube-api-access\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.406083 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-var-lock\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.406136 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kubelet-dir\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.507676 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-var-lock\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.507878 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kubelet-dir\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.507984 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kube-api-access\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.508622 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-var-lock\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.508740 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kubelet-dir\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.723437 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kube-api-access\") pod \"installer-9-crc\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:12 crc kubenswrapper[4948]: I0120 19:52:12.988543 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:52:17 crc kubenswrapper[4948]: I0120 19:52:17.582413 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:52:17 crc kubenswrapper[4948]: I0120 19:52:17.583345 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:52:20 crc kubenswrapper[4948]: I0120 19:52:20.420374 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:52:20 crc kubenswrapper[4948]: I0120 19:52:20.421135 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:52:27 crc kubenswrapper[4948]: E0120 19:52:27.000091 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 20 19:52:27 crc kubenswrapper[4948]: E0120 19:52:27.000892 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mk4wx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-m7lf9_openshift-marketplace(a443e18f-462b-4c81-9f70-3bae303f278f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 19:52:27 crc kubenswrapper[4948]: E0120 19:52:27.002094 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-m7lf9" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" Jan 20 19:52:27 crc kubenswrapper[4948]: I0120 19:52:27.567504 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:52:27 crc kubenswrapper[4948]: I0120 19:52:27.567575 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:52:33 crc kubenswrapper[4948]: E0120 19:52:33.611556 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 20 19:52:33 crc kubenswrapper[4948]: E0120 19:52:33.612057 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lvx6q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-flwsw_openshift-marketplace(b73db843-a550-4d8e-8aa1-0d6ce047cefe): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 19:52:33 crc kubenswrapper[4948]: E0120 19:52:33.614002 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-flwsw" podUID="b73db843-a550-4d8e-8aa1-0d6ce047cefe" Jan 20 19:52:33 crc kubenswrapper[4948]: E0120 19:52:33.936821 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 20 19:52:33 crc kubenswrapper[4948]: E0120 19:52:33.937631 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8v87x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-fpw4g_openshift-marketplace(0235a2ef-a094-4747-8aa5-581cb5f665a2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 19:52:33 crc kubenswrapper[4948]: E0120 19:52:33.939248 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-fpw4g" podUID="0235a2ef-a094-4747-8aa5-581cb5f665a2" Jan 20 19:52:37 crc kubenswrapper[4948]: I0120 19:52:37.567329 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:52:37 crc kubenswrapper[4948]: I0120 19:52:37.567727 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.024798 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.025225 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gtfgl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-bslf8_openshift-marketplace(31d44844-4319-4456-b6cc-88135734f548): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.026846 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-bslf8" podUID="31d44844-4319-4456-b6cc-88135734f548" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.048779 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.048949 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h4k45,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-4l26k_openshift-marketplace(4e87b4cc-edb1-4541-aff1-83012069d55c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.050335 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-4l26k" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.055595 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.055954 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hvz5r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-2hcgj_openshift-marketplace(aa1c9624-c789-4df8-8c32-eb95e7c40690): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.057042 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-2hcgj" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.156837 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-fpw4g" podUID="0235a2ef-a094-4747-8aa5-581cb5f665a2" Jan 20 19:52:38 crc kubenswrapper[4948]: E0120 19:52:38.156854 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-flwsw" podUID="b73db843-a550-4d8e-8aa1-0d6ce047cefe" Jan 20 19:52:38 crc kubenswrapper[4948]: I0120 19:52:38.526385 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"3612065f06d2e77ecd489de185fcabb910119ae8c3ffa592b881683a44b53e4c"} Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.200797 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-4l26k" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.201151 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-2hcgj" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.201866 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-bslf8" podUID="31d44844-4319-4456-b6cc-88135734f548" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.292129 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.292343 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6bc6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-rlfcl_openshift-marketplace(4c19381d-95b1-4813-8625-da98f07c486f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.293923 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-rlfcl" podUID="4c19381d-95b1-4813-8625-da98f07c486f" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.342055 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.342530 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l8v99,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-lzft6_openshift-marketplace(2dc4a3ea-7198-4d3c-a592-7734d229d481): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.343625 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-lzft6" podUID="2dc4a3ea-7198-4d3c-a592-7734d229d481" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.539760 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"d96e96e6402a9f6a2164c77f6a121ff586ea61daa6f7155cbeb118e4deb71d96"} Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.540788 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"8f47cdabb702507e8d20c1a709ae5e114cbf5b92c48f56d9a1693cc5464bc548"} Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.543086 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-9kr4w" event={"ID":"516ee408-b349-44cd-9ba3-1a486e631818","Type":"ContainerStarted","Data":"547590a2db6978916fe26bcb9609b9f8c55141fd191d9e35ed3addbfd217b66f"} Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.643873 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-lzft6" podUID="2dc4a3ea-7198-4d3c-a592-7734d229d481" Jan 20 19:52:40 crc kubenswrapper[4948]: E0120 19:52:40.643879 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-rlfcl" podUID="4c19381d-95b1-4813-8625-da98f07c486f" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.720498 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 20 19:52:40 crc kubenswrapper[4948]: W0120 19:52:40.755491 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod1e4a2cbe_b256_4833_865f_dea42e49f241.slice/crio-fb3b39e5e27de5dde4b17a8925ac4cfe618c129ff5eb346195d10dfde2d36c1d WatchSource:0}: Error finding container fb3b39e5e27de5dde4b17a8925ac4cfe618c129ff5eb346195d10dfde2d36c1d: Status 404 returned error can't find the container with id fb3b39e5e27de5dde4b17a8925ac4cfe618c129ff5eb346195d10dfde2d36c1d Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.816469 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.870839 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qjm22"] Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.871529 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.902848 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qjm22"] Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.943538 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.943625 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-registry-tls\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.943649 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.944390 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt9kq\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-kube-api-access-jt9kq\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.944426 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.944448 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-registry-certificates\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.944469 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-trusted-ca\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:40 crc kubenswrapper[4948]: I0120 19:52:40.944489 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-bound-sa-token\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.005948 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.046403 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-registry-certificates\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.046456 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-trusted-ca\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.046474 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-bound-sa-token\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.046524 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-registry-tls\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.046545 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.046596 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt9kq\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-kube-api-access-jt9kq\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.046612 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.047027 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-ca-trust-extracted\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.047595 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-trusted-ca\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.047670 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-registry-certificates\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.055583 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-registry-tls\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.056189 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-installation-pull-secrets\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.081308 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-bound-sa-token\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.090459 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt9kq\" (UniqueName: \"kubernetes.io/projected/c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02-kube-api-access-jt9kq\") pod \"image-registry-66df7c8f76-qjm22\" (UID: \"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02\") " pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.187635 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.844337 4948 generic.go:334] "Generic (PLEG): container finished" podID="a443e18f-462b-4c81-9f70-3bae303f278f" containerID="321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927" exitCode=0 Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.844421 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m7lf9" event={"ID":"a443e18f-462b-4c81-9f70-3bae303f278f","Type":"ContainerDied","Data":"321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.855899 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" event={"ID":"62f2de83-3044-4b23-943c-bcd26f659fb1","Type":"ContainerStarted","Data":"d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.856082 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" podUID="62f2de83-3044-4b23-943c-bcd26f659fb1" containerName="controller-manager" containerID="cri-o://d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7" gracePeriod=30 Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.856240 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.861965 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.874024 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"4b98431468e7589c5418fc86470b71b0b6c77ab1e88782b73c1e422262bdad7f"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.874779 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.885418 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"1e4a2cbe-b256-4833-865f-dea42e49f241","Type":"ContainerStarted","Data":"54c7becdc1f33b3f4d9279c827864464aea20a789a98af30376c2daf526d48cc"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.885455 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"1e4a2cbe-b256-4833-865f-dea42e49f241","Type":"ContainerStarted","Data":"fb3b39e5e27de5dde4b17a8925ac4cfe618c129ff5eb346195d10dfde2d36c1d"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.896030 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"70e7e8a62e799856078b78b007cce7ccfd5e0cb22a75bfcdc8a40c8ead668b62"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.907598 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" podUID="d95cc352-8fc3-423f-b035-512e1d0973a0" containerName="route-controller-manager" containerID="cri-o://23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade" gracePeriod=30 Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.907823 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" event={"ID":"d95cc352-8fc3-423f-b035-512e1d0973a0","Type":"ContainerStarted","Data":"23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.908063 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.916100 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"5bce8cba-e89c-4a8a-b261-ad8bae824ec9","Type":"ContainerStarted","Data":"bfffe0c60794c310b4c2fa84da3d2fdb0f4c958e2183fe5c6035ae2d8437e424"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.916256 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"5bce8cba-e89c-4a8a-b261-ad8bae824ec9","Type":"ContainerStarted","Data":"12bd6f07ade0778d2aaa3876890f276cdb6f900419937f6dc4559097e1acd045"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.926773 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"dc79adda67f6f84494ed600105cb4e34aa04d07f8b7fa0428772774526880128"} Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.927185 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.927240 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.927271 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.943273 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.948666 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" podStartSLOduration=56.948648579 podStartE2EDuration="56.948648579s" podCreationTimestamp="2026-01-20 19:51:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:41.907261879 +0000 UTC m=+189.857986848" watchObservedRunningTime="2026-01-20 19:52:41.948648579 +0000 UTC m=+189.899373548" Jan 20 19:52:41 crc kubenswrapper[4948]: I0120 19:52:41.997861 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=34.99783103 podStartE2EDuration="34.99783103s" podCreationTimestamp="2026-01-20 19:52:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:41.997073297 +0000 UTC m=+189.947798266" watchObservedRunningTime="2026-01-20 19:52:41.99783103 +0000 UTC m=+189.948555999" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.061906 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" podStartSLOduration=57.061887258 podStartE2EDuration="57.061887258s" podCreationTimestamp="2026-01-20 19:51:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:42.060625171 +0000 UTC m=+190.011350140" watchObservedRunningTime="2026-01-20 19:52:42.061887258 +0000 UTC m=+190.012612227" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.098052 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=30.098034754 podStartE2EDuration="30.098034754s" podCreationTimestamp="2026-01-20 19:52:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:42.089162633 +0000 UTC m=+190.039887602" watchObservedRunningTime="2026-01-20 19:52:42.098034754 +0000 UTC m=+190.048759723" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.144891 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-qjm22"] Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.882217 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.908494 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.935346 4948 generic.go:334] "Generic (PLEG): container finished" podID="d95cc352-8fc3-423f-b035-512e1d0973a0" containerID="23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade" exitCode=0 Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.935595 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" event={"ID":"d95cc352-8fc3-423f-b035-512e1d0973a0","Type":"ContainerDied","Data":"23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade"} Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.935678 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" event={"ID":"d95cc352-8fc3-423f-b035-512e1d0973a0","Type":"ContainerDied","Data":"0ac9af2bbc288b2882ce569fb32216ee79bcf5ee88a76203b89672fbbfbab2c3"} Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.935780 4948 scope.go:117] "RemoveContainer" containerID="23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.935987 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.946259 4948 generic.go:334] "Generic (PLEG): container finished" podID="62f2de83-3044-4b23-943c-bcd26f659fb1" containerID="d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7" exitCode=0 Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.946433 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" event={"ID":"62f2de83-3044-4b23-943c-bcd26f659fb1","Type":"ContainerDied","Data":"d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7"} Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.946549 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" event={"ID":"62f2de83-3044-4b23-943c-bcd26f659fb1","Type":"ContainerDied","Data":"5fce88fcaaabc12b4c52e003805659ac7f0c4b1716991a0c538e14ed98d260a1"} Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.946669 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86d66fccd8-rmbmx" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.953675 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-67c6b94b8c-zzm96"] Jan 20 19:52:42 crc kubenswrapper[4948]: E0120 19:52:42.954308 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f2de83-3044-4b23-943c-bcd26f659fb1" containerName="controller-manager" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.954333 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f2de83-3044-4b23-943c-bcd26f659fb1" containerName="controller-manager" Jan 20 19:52:42 crc kubenswrapper[4948]: E0120 19:52:42.954353 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d95cc352-8fc3-423f-b035-512e1d0973a0" containerName="route-controller-manager" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.954363 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d95cc352-8fc3-423f-b035-512e1d0973a0" containerName="route-controller-manager" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.954476 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d95cc352-8fc3-423f-b035-512e1d0973a0" containerName="route-controller-manager" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.954497 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="62f2de83-3044-4b23-943c-bcd26f659fb1" containerName="controller-manager" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.955021 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.957891 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" event={"ID":"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02","Type":"ContainerStarted","Data":"41d5a52d47aaa0a58d0a8835b7a023b8d097c9eca9f3429af4c72d34e3e5260e"} Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.957924 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" event={"ID":"c6ecadf6-64ae-4ea9-9e3c-1f8d42ebfa02","Type":"ContainerStarted","Data":"325cb560bbe12ece5344794f63fe61d771e56dbd0e38cc5052c278e2eec3f66a"} Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.957938 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.958546 4948 patch_prober.go:28] interesting pod/downloads-7954f5f757-9kr4w container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" start-of-body= Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.958839 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67c6b94b8c-zzm96"] Jan 20 19:52:42 crc kubenswrapper[4948]: I0120 19:52:42.959048 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-9kr4w" podUID="516ee408-b349-44cd-9ba3-1a486e631818" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.11:8080/\": dial tcp 10.217.0.11:8080: connect: connection refused" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.005585 4948 scope.go:117] "RemoveContainer" containerID="23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade" Jan 20 19:52:43 crc kubenswrapper[4948]: E0120 19:52:43.006933 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade\": container with ID starting with 23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade not found: ID does not exist" containerID="23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.006973 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade"} err="failed to get container status \"23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade\": rpc error: code = NotFound desc = could not find container \"23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade\": container with ID starting with 23f4a8078ead195dd7ea726b973fc52e90aae21b7802c787ae712ef1570e6ade not found: ID does not exist" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.007022 4948 scope.go:117] "RemoveContainer" containerID="d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.026007 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" podStartSLOduration=3.025987566 podStartE2EDuration="3.025987566s" podCreationTimestamp="2026-01-20 19:52:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:43.021267637 +0000 UTC m=+190.971992606" watchObservedRunningTime="2026-01-20 19:52:43.025987566 +0000 UTC m=+190.976712535" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.053801 4948 scope.go:117] "RemoveContainer" containerID="d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7" Jan 20 19:52:43 crc kubenswrapper[4948]: E0120 19:52:43.054355 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7\": container with ID starting with d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7 not found: ID does not exist" containerID="d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.054382 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7"} err="failed to get container status \"d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7\": rpc error: code = NotFound desc = could not find container \"d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7\": container with ID starting with d9a7c6f995acb0eae1ef4a50f9d80d2eac2c8dab5bf2f9863bdead8d1e6293a7 not found: ID does not exist" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057552 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-client-ca\") pod \"62f2de83-3044-4b23-943c-bcd26f659fb1\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057590 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f2de83-3044-4b23-943c-bcd26f659fb1-serving-cert\") pod \"62f2de83-3044-4b23-943c-bcd26f659fb1\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057636 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d95cc352-8fc3-423f-b035-512e1d0973a0-serving-cert\") pod \"d95cc352-8fc3-423f-b035-512e1d0973a0\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057657 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g765p\" (UniqueName: \"kubernetes.io/projected/d95cc352-8fc3-423f-b035-512e1d0973a0-kube-api-access-g765p\") pod \"d95cc352-8fc3-423f-b035-512e1d0973a0\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057675 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-client-ca\") pod \"d95cc352-8fc3-423f-b035-512e1d0973a0\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057692 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-proxy-ca-bundles\") pod \"62f2de83-3044-4b23-943c-bcd26f659fb1\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057803 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-config\") pod \"d95cc352-8fc3-423f-b035-512e1d0973a0\" (UID: \"d95cc352-8fc3-423f-b035-512e1d0973a0\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057823 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-config\") pod \"62f2de83-3044-4b23-943c-bcd26f659fb1\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.057845 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqkm7\" (UniqueName: \"kubernetes.io/projected/62f2de83-3044-4b23-943c-bcd26f659fb1-kube-api-access-mqkm7\") pod \"62f2de83-3044-4b23-943c-bcd26f659fb1\" (UID: \"62f2de83-3044-4b23-943c-bcd26f659fb1\") " Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.058011 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-proxy-ca-bundles\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.058086 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-client-ca\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.058116 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/663b6891-d5d0-4146-a751-3ef27b687254-serving-cert\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.058139 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx5wl\" (UniqueName: \"kubernetes.io/projected/663b6891-d5d0-4146-a751-3ef27b687254-kube-api-access-lx5wl\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.058187 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-config\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.058797 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-client-ca" (OuterVolumeSpecName: "client-ca") pod "62f2de83-3044-4b23-943c-bcd26f659fb1" (UID: "62f2de83-3044-4b23-943c-bcd26f659fb1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.059439 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-config" (OuterVolumeSpecName: "config") pod "d95cc352-8fc3-423f-b035-512e1d0973a0" (UID: "d95cc352-8fc3-423f-b035-512e1d0973a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.059052 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "62f2de83-3044-4b23-943c-bcd26f659fb1" (UID: "62f2de83-3044-4b23-943c-bcd26f659fb1"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.060078 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-config" (OuterVolumeSpecName: "config") pod "62f2de83-3044-4b23-943c-bcd26f659fb1" (UID: "62f2de83-3044-4b23-943c-bcd26f659fb1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.061430 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-client-ca" (OuterVolumeSpecName: "client-ca") pod "d95cc352-8fc3-423f-b035-512e1d0973a0" (UID: "d95cc352-8fc3-423f-b035-512e1d0973a0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.064973 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d95cc352-8fc3-423f-b035-512e1d0973a0-kube-api-access-g765p" (OuterVolumeSpecName: "kube-api-access-g765p") pod "d95cc352-8fc3-423f-b035-512e1d0973a0" (UID: "d95cc352-8fc3-423f-b035-512e1d0973a0"). InnerVolumeSpecName "kube-api-access-g765p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.065307 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62f2de83-3044-4b23-943c-bcd26f659fb1-kube-api-access-mqkm7" (OuterVolumeSpecName: "kube-api-access-mqkm7") pod "62f2de83-3044-4b23-943c-bcd26f659fb1" (UID: "62f2de83-3044-4b23-943c-bcd26f659fb1"). InnerVolumeSpecName "kube-api-access-mqkm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.065417 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62f2de83-3044-4b23-943c-bcd26f659fb1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "62f2de83-3044-4b23-943c-bcd26f659fb1" (UID: "62f2de83-3044-4b23-943c-bcd26f659fb1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.066811 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d95cc352-8fc3-423f-b035-512e1d0973a0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d95cc352-8fc3-423f-b035-512e1d0973a0" (UID: "d95cc352-8fc3-423f-b035-512e1d0973a0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159520 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-client-ca\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159585 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/663b6891-d5d0-4146-a751-3ef27b687254-serving-cert\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159625 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx5wl\" (UniqueName: \"kubernetes.io/projected/663b6891-d5d0-4146-a751-3ef27b687254-kube-api-access-lx5wl\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159697 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-config\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159781 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-proxy-ca-bundles\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159872 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqkm7\" (UniqueName: \"kubernetes.io/projected/62f2de83-3044-4b23-943c-bcd26f659fb1-kube-api-access-mqkm7\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159890 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159903 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62f2de83-3044-4b23-943c-bcd26f659fb1-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159915 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d95cc352-8fc3-423f-b035-512e1d0973a0-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159926 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159940 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g765p\" (UniqueName: \"kubernetes.io/projected/d95cc352-8fc3-423f-b035-512e1d0973a0-kube-api-access-g765p\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159953 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159964 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d95cc352-8fc3-423f-b035-512e1d0973a0-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.159978 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62f2de83-3044-4b23-943c-bcd26f659fb1-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.162170 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-proxy-ca-bundles\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.162393 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-client-ca\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.162569 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-config\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.166777 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/663b6891-d5d0-4146-a751-3ef27b687254-serving-cert\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.178008 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx5wl\" (UniqueName: \"kubernetes.io/projected/663b6891-d5d0-4146-a751-3ef27b687254-kube-api-access-lx5wl\") pod \"controller-manager-67c6b94b8c-zzm96\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.288967 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t"] Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.292991 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-77bfd6bcc7-rgk7t"] Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.308059 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-86d66fccd8-rmbmx"] Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.311683 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-86d66fccd8-rmbmx"] Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.317525 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.783874 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-67c6b94b8c-zzm96"] Jan 20 19:52:43 crc kubenswrapper[4948]: W0120 19:52:43.786515 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod663b6891_d5d0_4146_a751_3ef27b687254.slice/crio-0c30595ff3138284e7845415b81c751eda688a74fb28be42f63a941f42a5a094 WatchSource:0}: Error finding container 0c30595ff3138284e7845415b81c751eda688a74fb28be42f63a941f42a5a094: Status 404 returned error can't find the container with id 0c30595ff3138284e7845415b81c751eda688a74fb28be42f63a941f42a5a094 Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.963538 4948 generic.go:334] "Generic (PLEG): container finished" podID="1e4a2cbe-b256-4833-865f-dea42e49f241" containerID="54c7becdc1f33b3f4d9279c827864464aea20a789a98af30376c2daf526d48cc" exitCode=0 Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.963827 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"1e4a2cbe-b256-4833-865f-dea42e49f241","Type":"ContainerDied","Data":"54c7becdc1f33b3f4d9279c827864464aea20a789a98af30376c2daf526d48cc"} Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.966917 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" event={"ID":"663b6891-d5d0-4146-a751-3ef27b687254","Type":"ContainerStarted","Data":"0c30595ff3138284e7845415b81c751eda688a74fb28be42f63a941f42a5a094"} Jan 20 19:52:43 crc kubenswrapper[4948]: I0120 19:52:43.970153 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m7lf9" event={"ID":"a443e18f-462b-4c81-9f70-3bae303f278f","Type":"ContainerStarted","Data":"303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54"} Jan 20 19:52:44 crc kubenswrapper[4948]: I0120 19:52:44.056390 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-m7lf9" podStartSLOduration=8.354719868 podStartE2EDuration="1m21.056372107s" podCreationTimestamp="2026-01-20 19:51:23 +0000 UTC" firstStartedPulling="2026-01-20 19:51:30.190336356 +0000 UTC m=+118.141061325" lastFinishedPulling="2026-01-20 19:52:42.891988595 +0000 UTC m=+190.842713564" observedRunningTime="2026-01-20 19:52:44.052236715 +0000 UTC m=+192.002961694" watchObservedRunningTime="2026-01-20 19:52:44.056372107 +0000 UTC m=+192.007097076" Jan 20 19:52:44 crc kubenswrapper[4948]: I0120 19:52:44.784533 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62f2de83-3044-4b23-943c-bcd26f659fb1" path="/var/lib/kubelet/pods/62f2de83-3044-4b23-943c-bcd26f659fb1/volumes" Jan 20 19:52:44 crc kubenswrapper[4948]: I0120 19:52:44.785748 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d95cc352-8fc3-423f-b035-512e1d0973a0" path="/var/lib/kubelet/pods/d95cc352-8fc3-423f-b035-512e1d0973a0/volumes" Jan 20 19:52:44 crc kubenswrapper[4948]: I0120 19:52:44.967256 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-67c6b94b8c-zzm96"] Jan 20 19:52:44 crc kubenswrapper[4948]: I0120 19:52:44.979317 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" event={"ID":"663b6891-d5d0-4146-a751-3ef27b687254","Type":"ContainerStarted","Data":"48e487237171b12dc47b4f673b8367318e5dd900fc8c35a1348ffa3fa74cccb1"} Jan 20 19:52:44 crc kubenswrapper[4948]: I0120 19:52:44.980822 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.043687 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.111273 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" podStartSLOduration=41.1112539 podStartE2EDuration="41.1112539s" podCreationTimestamp="2026-01-20 19:52:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:45.042459872 +0000 UTC m=+192.993184841" watchObservedRunningTime="2026-01-20 19:52:45.1112539 +0000 UTC m=+193.061978869" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.181061 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl"] Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.181900 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.185051 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.185140 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.185465 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.185635 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.189964 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.190192 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.242830 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl"] Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.379328 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-config\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.379411 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c36b505-5b12-409d-a6cc-63c7ab827fec-serving-cert\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.379443 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-client-ca\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.379513 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4qzd\" (UniqueName: \"kubernetes.io/projected/7c36b505-5b12-409d-a6cc-63c7ab827fec-kube-api-access-g4qzd\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.468670 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.480724 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-config\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.480801 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c36b505-5b12-409d-a6cc-63c7ab827fec-serving-cert\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.480830 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-client-ca\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.480877 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4qzd\" (UniqueName: \"kubernetes.io/projected/7c36b505-5b12-409d-a6cc-63c7ab827fec-kube-api-access-g4qzd\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.483247 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-config\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.484601 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-client-ca\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.493547 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c36b505-5b12-409d-a6cc-63c7ab827fec-serving-cert\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.502940 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4qzd\" (UniqueName: \"kubernetes.io/projected/7c36b505-5b12-409d-a6cc-63c7ab827fec-kube-api-access-g4qzd\") pod \"route-controller-manager-5f65fb8948-hlfhl\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.508162 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.594055 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e4a2cbe-b256-4833-865f-dea42e49f241-kube-api-access\") pod \"1e4a2cbe-b256-4833-865f-dea42e49f241\" (UID: \"1e4a2cbe-b256-4833-865f-dea42e49f241\") " Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.594173 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e4a2cbe-b256-4833-865f-dea42e49f241-kubelet-dir\") pod \"1e4a2cbe-b256-4833-865f-dea42e49f241\" (UID: \"1e4a2cbe-b256-4833-865f-dea42e49f241\") " Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.594643 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1e4a2cbe-b256-4833-865f-dea42e49f241-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "1e4a2cbe-b256-4833-865f-dea42e49f241" (UID: "1e4a2cbe-b256-4833-865f-dea42e49f241"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.614226 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e4a2cbe-b256-4833-865f-dea42e49f241-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1e4a2cbe-b256-4833-865f-dea42e49f241" (UID: "1e4a2cbe-b256-4833-865f-dea42e49f241"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.700501 4948 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e4a2cbe-b256-4833-865f-dea42e49f241-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.700531 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e4a2cbe-b256-4833-865f-dea42e49f241-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.879689 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl"] Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.987309 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" event={"ID":"7c36b505-5b12-409d-a6cc-63c7ab827fec","Type":"ContainerStarted","Data":"7b1d36fbf562b1ba797c43a4fa9814b3870cee3566e660914a180a0fe4d09e4a"} Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.988668 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"1e4a2cbe-b256-4833-865f-dea42e49f241","Type":"ContainerDied","Data":"fb3b39e5e27de5dde4b17a8925ac4cfe618c129ff5eb346195d10dfde2d36c1d"} Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.988695 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb3b39e5e27de5dde4b17a8925ac4cfe618c129ff5eb346195d10dfde2d36c1d" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.988759 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 20 19:52:45 crc kubenswrapper[4948]: I0120 19:52:45.988837 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" podUID="663b6891-d5d0-4146-a751-3ef27b687254" containerName="controller-manager" containerID="cri-o://48e487237171b12dc47b4f673b8367318e5dd900fc8c35a1348ffa3fa74cccb1" gracePeriod=30 Jan 20 19:52:47 crc kubenswrapper[4948]: I0120 19:52:47.573927 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-9kr4w" Jan 20 19:52:49 crc kubenswrapper[4948]: I0120 19:52:49.007849 4948 generic.go:334] "Generic (PLEG): container finished" podID="663b6891-d5d0-4146-a751-3ef27b687254" containerID="48e487237171b12dc47b4f673b8367318e5dd900fc8c35a1348ffa3fa74cccb1" exitCode=0 Jan 20 19:52:49 crc kubenswrapper[4948]: I0120 19:52:49.007929 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" event={"ID":"663b6891-d5d0-4146-a751-3ef27b687254","Type":"ContainerDied","Data":"48e487237171b12dc47b4f673b8367318e5dd900fc8c35a1348ffa3fa74cccb1"} Jan 20 19:52:49 crc kubenswrapper[4948]: I0120 19:52:49.779317 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vxm8l"] Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.016161 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" event={"ID":"7c36b505-5b12-409d-a6cc-63c7ab827fec","Type":"ContainerStarted","Data":"78733da8e436856ad89bc8e5fe0dc5db88ece6739df841ddd4e3c6fa7001a80b"} Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.016438 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.022349 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.062315 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" podStartSLOduration=5.062297365 podStartE2EDuration="5.062297365s" podCreationTimestamp="2026-01-20 19:52:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:50.041875903 +0000 UTC m=+197.992600872" watchObservedRunningTime="2026-01-20 19:52:50.062297365 +0000 UTC m=+198.013022334" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.249771 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.249838 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.775658 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.871832 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/663b6891-d5d0-4146-a751-3ef27b687254-serving-cert\") pod \"663b6891-d5d0-4146-a751-3ef27b687254\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.871924 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lx5wl\" (UniqueName: \"kubernetes.io/projected/663b6891-d5d0-4146-a751-3ef27b687254-kube-api-access-lx5wl\") pod \"663b6891-d5d0-4146-a751-3ef27b687254\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.871953 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-proxy-ca-bundles\") pod \"663b6891-d5d0-4146-a751-3ef27b687254\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.871980 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-client-ca\") pod \"663b6891-d5d0-4146-a751-3ef27b687254\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.872042 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-config\") pod \"663b6891-d5d0-4146-a751-3ef27b687254\" (UID: \"663b6891-d5d0-4146-a751-3ef27b687254\") " Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.873287 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "663b6891-d5d0-4146-a751-3ef27b687254" (UID: "663b6891-d5d0-4146-a751-3ef27b687254"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.873470 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-config" (OuterVolumeSpecName: "config") pod "663b6891-d5d0-4146-a751-3ef27b687254" (UID: "663b6891-d5d0-4146-a751-3ef27b687254"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.873878 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-client-ca" (OuterVolumeSpecName: "client-ca") pod "663b6891-d5d0-4146-a751-3ef27b687254" (UID: "663b6891-d5d0-4146-a751-3ef27b687254"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.879087 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/663b6891-d5d0-4146-a751-3ef27b687254-kube-api-access-lx5wl" (OuterVolumeSpecName: "kube-api-access-lx5wl") pod "663b6891-d5d0-4146-a751-3ef27b687254" (UID: "663b6891-d5d0-4146-a751-3ef27b687254"). InnerVolumeSpecName "kube-api-access-lx5wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.887952 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/663b6891-d5d0-4146-a751-3ef27b687254-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "663b6891-d5d0-4146-a751-3ef27b687254" (UID: "663b6891-d5d0-4146-a751-3ef27b687254"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.973242 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/663b6891-d5d0-4146-a751-3ef27b687254-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.973279 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lx5wl\" (UniqueName: \"kubernetes.io/projected/663b6891-d5d0-4146-a751-3ef27b687254-kube-api-access-lx5wl\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.973290 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.973299 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:50 crc kubenswrapper[4948]: I0120 19:52:50.973328 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/663b6891-d5d0-4146-a751-3ef27b687254-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:51 crc kubenswrapper[4948]: I0120 19:52:51.025260 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" Jan 20 19:52:51 crc kubenswrapper[4948]: I0120 19:52:51.025351 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-67c6b94b8c-zzm96" event={"ID":"663b6891-d5d0-4146-a751-3ef27b687254","Type":"ContainerDied","Data":"0c30595ff3138284e7845415b81c751eda688a74fb28be42f63a941f42a5a094"} Jan 20 19:52:51 crc kubenswrapper[4948]: I0120 19:52:51.025433 4948 scope.go:117] "RemoveContainer" containerID="48e487237171b12dc47b4f673b8367318e5dd900fc8c35a1348ffa3fa74cccb1" Jan 20 19:52:51 crc kubenswrapper[4948]: I0120 19:52:51.071136 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-67c6b94b8c-zzm96"] Jan 20 19:52:51 crc kubenswrapper[4948]: I0120 19:52:51.074436 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-67c6b94b8c-zzm96"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.210365 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fpw4g"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.219934 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m7lf9"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.220307 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-m7lf9" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" containerName="registry-server" containerID="cri-o://303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54" gracePeriod=30 Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.224411 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2hcgj"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.240958 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4l26k"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.309751 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbslp"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.309984 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerName="marketplace-operator" containerID="cri-o://bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6" gracePeriod=30 Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.314602 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lzft6"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.317481 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlfcl"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.329227 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-z8fwl"] Jan 20 19:52:52 crc kubenswrapper[4948]: E0120 19:52:52.329557 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="663b6891-d5d0-4146-a751-3ef27b687254" containerName="controller-manager" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.329569 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="663b6891-d5d0-4146-a751-3ef27b687254" containerName="controller-manager" Jan 20 19:52:52 crc kubenswrapper[4948]: E0120 19:52:52.329597 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e4a2cbe-b256-4833-865f-dea42e49f241" containerName="pruner" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.329603 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e4a2cbe-b256-4833-865f-dea42e49f241" containerName="pruner" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.329766 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e4a2cbe-b256-4833-865f-dea42e49f241" containerName="pruner" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.329804 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="663b6891-d5d0-4146-a751-3ef27b687254" containerName="controller-manager" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.330367 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.340448 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bslf8"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.350389 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-flwsw"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.357072 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-z8fwl"] Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.513988 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cf25c7d-e351-4a2e-8992-47542811fb1f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.514416 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7cf25c7d-e351-4a2e-8992-47542811fb1f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.514536 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhv9p\" (UniqueName: \"kubernetes.io/projected/7cf25c7d-e351-4a2e-8992-47542811fb1f-kube-api-access-bhv9p\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.615668 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhv9p\" (UniqueName: \"kubernetes.io/projected/7cf25c7d-e351-4a2e-8992-47542811fb1f-kube-api-access-bhv9p\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.615813 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cf25c7d-e351-4a2e-8992-47542811fb1f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.615874 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7cf25c7d-e351-4a2e-8992-47542811fb1f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.617439 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7cf25c7d-e351-4a2e-8992-47542811fb1f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.622615 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7cf25c7d-e351-4a2e-8992-47542811fb1f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.632142 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhv9p\" (UniqueName: \"kubernetes.io/projected/7cf25c7d-e351-4a2e-8992-47542811fb1f-kube-api-access-bhv9p\") pod \"marketplace-operator-79b997595-z8fwl\" (UID: \"7cf25c7d-e351-4a2e-8992-47542811fb1f\") " pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.705543 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="663b6891-d5d0-4146-a751-3ef27b687254" path="/var/lib/kubelet/pods/663b6891-d5d0-4146-a751-3ef27b687254/volumes" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.742833 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:52 crc kubenswrapper[4948]: I0120 19:52:52.984300 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.027978 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.075043 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-flwsw" event={"ID":"b73db843-a550-4d8e-8aa1-0d6ce047cefe","Type":"ContainerDied","Data":"3b205c44aebcb92f8d1578ef94f226a9bb35120612b0aba12ce9a7dfdf77dcc0"} Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.075104 4948 scope.go:117] "RemoveContainer" containerID="defb5cb985994e8f6c63ae9d8ae05aaa0ee2d3b1d2e5cdecba1f00f2df3ffcd5" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.075242 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-flwsw" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.128611 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-utilities\") pod \"0235a2ef-a094-4747-8aa5-581cb5f665a2\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.128790 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-catalog-content\") pod \"0235a2ef-a094-4747-8aa5-581cb5f665a2\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.129042 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v87x\" (UniqueName: \"kubernetes.io/projected/0235a2ef-a094-4747-8aa5-581cb5f665a2-kube-api-access-8v87x\") pod \"0235a2ef-a094-4747-8aa5-581cb5f665a2\" (UID: \"0235a2ef-a094-4747-8aa5-581cb5f665a2\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.133235 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-utilities" (OuterVolumeSpecName: "utilities") pod "0235a2ef-a094-4747-8aa5-581cb5f665a2" (UID: "0235a2ef-a094-4747-8aa5-581cb5f665a2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.133366 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0235a2ef-a094-4747-8aa5-581cb5f665a2" (UID: "0235a2ef-a094-4747-8aa5-581cb5f665a2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.143951 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0235a2ef-a094-4747-8aa5-581cb5f665a2-kube-api-access-8v87x" (OuterVolumeSpecName: "kube-api-access-8v87x") pod "0235a2ef-a094-4747-8aa5-581cb5f665a2" (UID: "0235a2ef-a094-4747-8aa5-581cb5f665a2"). InnerVolumeSpecName "kube-api-access-8v87x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.157802 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fpw4g" event={"ID":"0235a2ef-a094-4747-8aa5-581cb5f665a2","Type":"ContainerDied","Data":"a8adec5b2359f950454153a734f1b42c202274e8dd4d6e40699eec012d1841ca"} Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.157964 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fpw4g" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.162263 4948 scope.go:117] "RemoveContainer" containerID="1c0bd8a73d68263e8e7b2dc44b49cee342785962a6625b74a5bc48d3b39e6562" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.230908 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-catalog-content\") pod \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.231246 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-utilities\") pod \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.231362 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvx6q\" (UniqueName: \"kubernetes.io/projected/b73db843-a550-4d8e-8aa1-0d6ce047cefe-kube-api-access-lvx6q\") pod \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\" (UID: \"b73db843-a550-4d8e-8aa1-0d6ce047cefe\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.231656 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.231676 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0235a2ef-a094-4747-8aa5-581cb5f665a2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.231692 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v87x\" (UniqueName: \"kubernetes.io/projected/0235a2ef-a094-4747-8aa5-581cb5f665a2-kube-api-access-8v87x\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.232822 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b73db843-a550-4d8e-8aa1-0d6ce047cefe" (UID: "b73db843-a550-4d8e-8aa1-0d6ce047cefe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.234018 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-utilities" (OuterVolumeSpecName: "utilities") pod "b73db843-a550-4d8e-8aa1-0d6ce047cefe" (UID: "b73db843-a550-4d8e-8aa1-0d6ce047cefe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.235234 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fpw4g"] Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.236684 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b73db843-a550-4d8e-8aa1-0d6ce047cefe-kube-api-access-lvx6q" (OuterVolumeSpecName: "kube-api-access-lvx6q") pod "b73db843-a550-4d8e-8aa1-0d6ce047cefe" (UID: "b73db843-a550-4d8e-8aa1-0d6ce047cefe"). InnerVolumeSpecName "kube-api-access-lvx6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.242591 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fpw4g"] Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.245320 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.333234 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvx6q\" (UniqueName: \"kubernetes.io/projected/b73db843-a550-4d8e-8aa1-0d6ce047cefe-kube-api-access-lvx6q\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.333275 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.333288 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b73db843-a550-4d8e-8aa1-0d6ce047cefe-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.420281 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-flwsw"] Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.423222 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-flwsw"] Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.435077 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l8v99\" (UniqueName: \"kubernetes.io/projected/2dc4a3ea-7198-4d3c-a592-7734d229d481-kube-api-access-l8v99\") pod \"2dc4a3ea-7198-4d3c-a592-7734d229d481\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.435176 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-utilities\") pod \"2dc4a3ea-7198-4d3c-a592-7734d229d481\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.435239 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-catalog-content\") pod \"2dc4a3ea-7198-4d3c-a592-7734d229d481\" (UID: \"2dc4a3ea-7198-4d3c-a592-7734d229d481\") " Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.435748 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2dc4a3ea-7198-4d3c-a592-7734d229d481" (UID: "2dc4a3ea-7198-4d3c-a592-7734d229d481"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.436155 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-utilities" (OuterVolumeSpecName: "utilities") pod "2dc4a3ea-7198-4d3c-a592-7734d229d481" (UID: "2dc4a3ea-7198-4d3c-a592-7734d229d481"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.437916 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dc4a3ea-7198-4d3c-a592-7734d229d481-kube-api-access-l8v99" (OuterVolumeSpecName: "kube-api-access-l8v99") pod "2dc4a3ea-7198-4d3c-a592-7734d229d481" (UID: "2dc4a3ea-7198-4d3c-a592-7734d229d481"). InnerVolumeSpecName "kube-api-access-l8v99". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.537118 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l8v99\" (UniqueName: \"kubernetes.io/projected/2dc4a3ea-7198-4d3c-a592-7734d229d481-kube-api-access-l8v99\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.537160 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.537173 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dc4a3ea-7198-4d3c-a592-7734d229d481-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:53 crc kubenswrapper[4948]: W0120 19:52:53.587829 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7cf25c7d_e351_4a2e_8992_47542811fb1f.slice/crio-4033c70255005f17e4ec6ce6dc3be1d256e92931d7a8f84bf7e3371c596f5a7f WatchSource:0}: Error finding container 4033c70255005f17e4ec6ce6dc3be1d256e92931d7a8f84bf7e3371c596f5a7f: Status 404 returned error can't find the container with id 4033c70255005f17e4ec6ce6dc3be1d256e92931d7a8f84bf7e3371c596f5a7f Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.592180 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-z8fwl"] Jan 20 19:52:53 crc kubenswrapper[4948]: I0120 19:52:53.746013 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.138755 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h"] Jan 20 19:52:54 crc kubenswrapper[4948]: E0120 19:52:54.139320 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b73db843-a550-4d8e-8aa1-0d6ce047cefe" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.139370 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b73db843-a550-4d8e-8aa1-0d6ce047cefe" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: E0120 19:52:54.139399 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dc4a3ea-7198-4d3c-a592-7734d229d481" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.139408 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dc4a3ea-7198-4d3c-a592-7734d229d481" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: E0120 19:52:54.139425 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0235a2ef-a094-4747-8aa5-581cb5f665a2" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.139433 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0235a2ef-a094-4747-8aa5-581cb5f665a2" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.139566 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dc4a3ea-7198-4d3c-a592-7734d229d481" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.139582 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b73db843-a550-4d8e-8aa1-0d6ce047cefe" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.139594 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0235a2ef-a094-4747-8aa5-581cb5f665a2" containerName="extract-utilities" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.140207 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.142210 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.142878 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.143018 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.143457 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.146754 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8f09ba9-24f6-472e-8d51-9991c732386b-serving-cert\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.146829 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-proxy-ca-bundles\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.146875 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-config\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.146918 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-client-ca\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.146956 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7lvb\" (UniqueName: \"kubernetes.io/projected/f8f09ba9-24f6-472e-8d51-9991c732386b-kube-api-access-f7lvb\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.147246 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h"] Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.150583 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.151577 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.171664 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.180408 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" event={"ID":"7cf25c7d-e351-4a2e-8992-47542811fb1f","Type":"ContainerStarted","Data":"4033c70255005f17e4ec6ce6dc3be1d256e92931d7a8f84bf7e3371c596f5a7f"} Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.184107 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lzft6" event={"ID":"2dc4a3ea-7198-4d3c-a592-7734d229d481","Type":"ContainerDied","Data":"a8e545883330fe15952d5347da65f706486ac70cf1e7c82b60d322486f2bee73"} Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.184154 4948 scope.go:117] "RemoveContainer" containerID="1ab669a3f8b548dca77f3f93943091b7d6cfea5254e61b0f5f144617eeefdd6f" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.184250 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lzft6" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.242278 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lzft6"] Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.245295 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lzft6"] Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.247615 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8f09ba9-24f6-472e-8d51-9991c732386b-serving-cert\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.247811 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-proxy-ca-bundles\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.247933 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-config\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.248085 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-client-ca\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.248227 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7lvb\" (UniqueName: \"kubernetes.io/projected/f8f09ba9-24f6-472e-8d51-9991c732386b-kube-api-access-f7lvb\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.249137 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-proxy-ca-bundles\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.249687 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-client-ca\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.249812 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-config\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.251475 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8f09ba9-24f6-472e-8d51-9991c732386b-serving-cert\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.266512 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7lvb\" (UniqueName: \"kubernetes.io/projected/f8f09ba9-24f6-472e-8d51-9991c732386b-kube-api-access-f7lvb\") pod \"controller-manager-6c75f5bc9c-bkb4h\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.469975 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.588768 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0235a2ef-a094-4747-8aa5-581cb5f665a2" path="/var/lib/kubelet/pods/0235a2ef-a094-4747-8aa5-581cb5f665a2/volumes" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.589851 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dc4a3ea-7198-4d3c-a592-7734d229d481" path="/var/lib/kubelet/pods/2dc4a3ea-7198-4d3c-a592-7734d229d481/volumes" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.590292 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b73db843-a550-4d8e-8aa1-0d6ce047cefe" path="/var/lib/kubelet/pods/b73db843-a550-4d8e-8aa1-0d6ce047cefe/volumes" Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.909037 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h"] Jan 20 19:52:54 crc kubenswrapper[4948]: W0120 19:52:54.925032 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf8f09ba9_24f6_472e_8d51_9991c732386b.slice/crio-dc4f903532d5044e99e79963bd4e44b20f99697a42b544372bddb4c5593d9c7a WatchSource:0}: Error finding container dc4f903532d5044e99e79963bd4e44b20f99697a42b544372bddb4c5593d9c7a: Status 404 returned error can't find the container with id dc4f903532d5044e99e79963bd4e44b20f99697a42b544372bddb4c5593d9c7a Jan 20 19:52:54 crc kubenswrapper[4948]: I0120 19:52:54.942107 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.072867 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-trusted-ca\") pod \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.072959 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ftbm\" (UniqueName: \"kubernetes.io/projected/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-kube-api-access-2ftbm\") pod \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.073018 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-operator-metrics\") pod \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\" (UID: \"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f\") " Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.074107 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" (UID: "1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.087897 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-kube-api-access-2ftbm" (OuterVolumeSpecName: "kube-api-access-2ftbm") pod "1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" (UID: "1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f"). InnerVolumeSpecName "kube-api-access-2ftbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.088607 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" (UID: "1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.174135 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ftbm\" (UniqueName: \"kubernetes.io/projected/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-kube-api-access-2ftbm\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.174164 4948 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.174175 4948 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.213687 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.242309 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hsxfw"] Jan 20 19:52:55 crc kubenswrapper[4948]: E0120 19:52:55.242531 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerName="marketplace-operator" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.242542 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerName="marketplace-operator" Jan 20 19:52:55 crc kubenswrapper[4948]: E0120 19:52:55.242556 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" containerName="extract-utilities" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.242562 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" containerName="extract-utilities" Jan 20 19:52:55 crc kubenswrapper[4948]: E0120 19:52:55.242574 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" containerName="extract-content" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.242581 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" containerName="extract-content" Jan 20 19:52:55 crc kubenswrapper[4948]: E0120 19:52:55.242589 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" containerName="registry-server" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.242594 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" containerName="registry-server" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.242686 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" containerName="registry-server" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.242723 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerName="marketplace-operator" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.243583 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.314118 4948 generic.go:334] "Generic (PLEG): container finished" podID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" containerID="bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6" exitCode=0 Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.314223 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" event={"ID":"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f","Type":"ContainerDied","Data":"bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.314251 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" event={"ID":"1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f","Type":"ContainerDied","Data":"2d1e4e93ea5cbe0174b2009e834aa6e18c274933e64ef3f3f69484b8f786ffd3"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.314268 4948 scope.go:117] "RemoveContainer" containerID="bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.314362 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bbslp" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.334048 4948 generic.go:334] "Generic (PLEG): container finished" podID="a443e18f-462b-4c81-9f70-3bae303f278f" containerID="303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54" exitCode=0 Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.334432 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m7lf9" event={"ID":"a443e18f-462b-4c81-9f70-3bae303f278f","Type":"ContainerDied","Data":"303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.334475 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-m7lf9" event={"ID":"a443e18f-462b-4c81-9f70-3bae303f278f","Type":"ContainerDied","Data":"2346d161d11be9382e639a13a4a2ad0347b94fb675f749934d4db9a83ae7815c"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.334567 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-m7lf9" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.338780 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" event={"ID":"7cf25c7d-e351-4a2e-8992-47542811fb1f","Type":"ContainerStarted","Data":"648d0751e6ca0869747efc4dab3723b1746735080e4a0ef47ce408aaa4545e5f"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.340082 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.344604 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l26k" event={"ID":"4e87b4cc-edb1-4541-aff1-83012069d55c","Type":"ContainerStarted","Data":"a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.344956 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4l26k" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" containerName="extract-content" containerID="cri-o://a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446" gracePeriod=30 Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.353473 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" event={"ID":"f8f09ba9-24f6-472e-8d51-9991c732386b","Type":"ContainerStarted","Data":"f8ec1e4f4846fa5100309825dcadf9f0f2559220ca2987aef70803f39844768d"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.353528 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" event={"ID":"f8f09ba9-24f6-472e-8d51-9991c732386b","Type":"ContainerStarted","Data":"dc4f903532d5044e99e79963bd4e44b20f99697a42b544372bddb4c5593d9c7a"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.354975 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.356097 4948 patch_prober.go:28] interesting pod/controller-manager-6c75f5bc9c-bkb4h container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.65:8443/healthz\": dial tcp 10.217.0.65:8443: connect: connection refused" start-of-body= Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.356140 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" podUID="f8f09ba9-24f6-472e-8d51-9991c732386b" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.65:8443/healthz\": dial tcp 10.217.0.65:8443: connect: connection refused" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.360938 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlfcl" event={"ID":"4c19381d-95b1-4813-8625-da98f07c486f","Type":"ContainerStarted","Data":"56cb771c8ed5e83a35ba17ba0aff8abe79276c9e31afa6d67c449bbfba82a9a3"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.361218 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rlfcl" podUID="4c19381d-95b1-4813-8625-da98f07c486f" containerName="extract-content" containerID="cri-o://56cb771c8ed5e83a35ba17ba0aff8abe79276c9e31afa6d67c449bbfba82a9a3" gracePeriod=30 Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.367599 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bslf8" event={"ID":"31d44844-4319-4456-b6cc-88135734f548","Type":"ContainerStarted","Data":"2df8167685b9300b840aa951c1049b00090865781790408ab6b60c7c04e72d67"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.367927 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bslf8" podUID="31d44844-4319-4456-b6cc-88135734f548" containerName="extract-content" containerID="cri-o://2df8167685b9300b840aa951c1049b00090865781790408ab6b60c7c04e72d67" gracePeriod=30 Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.370081 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.381469 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hsxfw"] Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.391289 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-utilities\") pod \"a443e18f-462b-4c81-9f70-3bae303f278f\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.391344 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk4wx\" (UniqueName: \"kubernetes.io/projected/a443e18f-462b-4c81-9f70-3bae303f278f-kube-api-access-mk4wx\") pod \"a443e18f-462b-4c81-9f70-3bae303f278f\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.391368 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-catalog-content\") pod \"a443e18f-462b-4c81-9f70-3bae303f278f\" (UID: \"a443e18f-462b-4c81-9f70-3bae303f278f\") " Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.391456 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8d1e5d7-2511-47ad-b240-677792863a32-utilities\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.391492 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blkgl\" (UniqueName: \"kubernetes.io/projected/f8d1e5d7-2511-47ad-b240-677792863a32-kube-api-access-blkgl\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.391530 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8d1e5d7-2511-47ad-b240-677792863a32-catalog-content\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.392838 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2hcgj" event={"ID":"aa1c9624-c789-4df8-8c32-eb95e7c40690","Type":"ContainerStarted","Data":"343ee5ee62efaf61a02e6e54deee401f699587e7ab40c46a87370d412b68149f"} Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.392980 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-utilities" (OuterVolumeSpecName: "utilities") pod "a443e18f-462b-4c81-9f70-3bae303f278f" (UID: "a443e18f-462b-4c81-9f70-3bae303f278f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.393041 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2hcgj" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" containerName="extract-content" containerID="cri-o://343ee5ee62efaf61a02e6e54deee401f699587e7ab40c46a87370d412b68149f" gracePeriod=30 Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.557729 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8d1e5d7-2511-47ad-b240-677792863a32-utilities\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.557844 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blkgl\" (UniqueName: \"kubernetes.io/projected/f8d1e5d7-2511-47ad-b240-677792863a32-kube-api-access-blkgl\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.557885 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8d1e5d7-2511-47ad-b240-677792863a32-catalog-content\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.558030 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.558789 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8d1e5d7-2511-47ad-b240-677792863a32-catalog-content\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.563417 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8d1e5d7-2511-47ad-b240-677792863a32-utilities\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.564980 4948 scope.go:117] "RemoveContainer" containerID="bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6" Jan 20 19:52:55 crc kubenswrapper[4948]: E0120 19:52:55.566238 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6\": container with ID starting with bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6 not found: ID does not exist" containerID="bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.574228 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6"} err="failed to get container status \"bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6\": rpc error: code = NotFound desc = could not find container \"bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6\": container with ID starting with bcc0fbfddccb9a6eb9a0a0afc19556337fb6d55f391629a3bcbabbbe866559a6 not found: ID does not exist" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.574307 4948 scope.go:117] "RemoveContainer" containerID="303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.583597 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a443e18f-462b-4c81-9f70-3bae303f278f-kube-api-access-mk4wx" (OuterVolumeSpecName: "kube-api-access-mk4wx") pod "a443e18f-462b-4c81-9f70-3bae303f278f" (UID: "a443e18f-462b-4c81-9f70-3bae303f278f"). InnerVolumeSpecName "kube-api-access-mk4wx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.620448 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" podStartSLOduration=10.620420559 podStartE2EDuration="10.620420559s" podCreationTimestamp="2026-01-20 19:52:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:55.570112325 +0000 UTC m=+203.520837294" watchObservedRunningTime="2026-01-20 19:52:55.620420559 +0000 UTC m=+203.571145538" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.634844 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blkgl\" (UniqueName: \"kubernetes.io/projected/f8d1e5d7-2511-47ad-b240-677792863a32-kube-api-access-blkgl\") pod \"redhat-marketplace-hsxfw\" (UID: \"f8d1e5d7-2511-47ad-b240-677792863a32\") " pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.653974 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" podStartSLOduration=3.653950848 podStartE2EDuration="3.653950848s" podCreationTimestamp="2026-01-20 19:52:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:52:55.632511765 +0000 UTC m=+203.583236734" watchObservedRunningTime="2026-01-20 19:52:55.653950848 +0000 UTC m=+203.604675817" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.661058 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk4wx\" (UniqueName: \"kubernetes.io/projected/a443e18f-462b-4c81-9f70-3bae303f278f-kube-api-access-mk4wx\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.715741 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a443e18f-462b-4c81-9f70-3bae303f278f" (UID: "a443e18f-462b-4c81-9f70-3bae303f278f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.750010 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.760805 4948 scope.go:117] "RemoveContainer" containerID="321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.762463 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a443e18f-462b-4c81-9f70-3bae303f278f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.765754 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbslp"] Jan 20 19:52:55 crc kubenswrapper[4948]: I0120 19:52:55.906551 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbslp"] Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.058056 4948 scope.go:117] "RemoveContainer" containerID="e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.096551 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-m7lf9"] Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.103719 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-m7lf9"] Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.118904 4948 scope.go:117] "RemoveContainer" containerID="303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54" Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.119628 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54\": container with ID starting with 303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54 not found: ID does not exist" containerID="303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.119664 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54"} err="failed to get container status \"303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54\": rpc error: code = NotFound desc = could not find container \"303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54\": container with ID starting with 303762a74e7ce23ba45d80f0461b4ce4f72c99f79239037c892c6a181f37ab54 not found: ID does not exist" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.119693 4948 scope.go:117] "RemoveContainer" containerID="321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927" Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.146588 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927\": container with ID starting with 321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927 not found: ID does not exist" containerID="321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.146633 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927"} err="failed to get container status \"321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927\": rpc error: code = NotFound desc = could not find container \"321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927\": container with ID starting with 321ebbff3d249388209446c22100c991ec2c62981ad852d6eb0f9cf19aade927 not found: ID does not exist" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.146660 4948 scope.go:117] "RemoveContainer" containerID="e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817" Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.148217 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817\": container with ID starting with e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817 not found: ID does not exist" containerID="e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.148273 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817"} err="failed to get container status \"e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817\": rpc error: code = NotFound desc = could not find container \"e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817\": container with ID starting with e4de5ffa35a8cbe0783cf61663f3dd0d44a8bf8a17b0de53c09e4cddbd683817 not found: ID does not exist" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.414040 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l26k_4e87b4cc-edb1-4541-aff1-83012069d55c/extract-content/0.log" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.414480 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.419017 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2hcgj_aa1c9624-c789-4df8-8c32-eb95e7c40690/extract-content/0.log" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.419329 4948 generic.go:334] "Generic (PLEG): container finished" podID="aa1c9624-c789-4df8-8c32-eb95e7c40690" containerID="343ee5ee62efaf61a02e6e54deee401f699587e7ab40c46a87370d412b68149f" exitCode=2 Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.419379 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2hcgj" event={"ID":"aa1c9624-c789-4df8-8c32-eb95e7c40690","Type":"ContainerDied","Data":"343ee5ee62efaf61a02e6e54deee401f699587e7ab40c46a87370d412b68149f"} Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.421145 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rlfcl_4c19381d-95b1-4813-8625-da98f07c486f/extract-content/0.log" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.421380 4948 generic.go:334] "Generic (PLEG): container finished" podID="4c19381d-95b1-4813-8625-da98f07c486f" containerID="56cb771c8ed5e83a35ba17ba0aff8abe79276c9e31afa6d67c449bbfba82a9a3" exitCode=2 Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.421432 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlfcl" event={"ID":"4c19381d-95b1-4813-8625-da98f07c486f","Type":"ContainerDied","Data":"56cb771c8ed5e83a35ba17ba0aff8abe79276c9e31afa6d67c449bbfba82a9a3"} Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.422318 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bslf8_31d44844-4319-4456-b6cc-88135734f548/extract-content/0.log" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.422559 4948 generic.go:334] "Generic (PLEG): container finished" podID="31d44844-4319-4456-b6cc-88135734f548" containerID="2df8167685b9300b840aa951c1049b00090865781790408ab6b60c7c04e72d67" exitCode=2 Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.422592 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bslf8" event={"ID":"31d44844-4319-4456-b6cc-88135734f548","Type":"ContainerDied","Data":"2df8167685b9300b840aa951c1049b00090865781790408ab6b60c7c04e72d67"} Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.423560 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-4l26k_4e87b4cc-edb1-4541-aff1-83012069d55c/extract-content/0.log" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.423863 4948 generic.go:334] "Generic (PLEG): container finished" podID="4e87b4cc-edb1-4541-aff1-83012069d55c" containerID="a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446" exitCode=2 Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.424925 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4l26k" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.425050 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l26k" event={"ID":"4e87b4cc-edb1-4541-aff1-83012069d55c","Type":"ContainerDied","Data":"a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446"} Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.425068 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4l26k" event={"ID":"4e87b4cc-edb1-4541-aff1-83012069d55c","Type":"ContainerDied","Data":"7aa2ede1634ac35be7f36c7e80da7ab008dab510bc76fd9bdcae0d6ab2edea23"} Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.425083 4948 scope.go:117] "RemoveContainer" containerID="a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.472829 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.484093 4948 scope.go:117] "RemoveContainer" containerID="d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.553360 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-utilities\") pod \"4e87b4cc-edb1-4541-aff1-83012069d55c\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.553620 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-catalog-content\") pod \"4e87b4cc-edb1-4541-aff1-83012069d55c\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.553680 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4k45\" (UniqueName: \"kubernetes.io/projected/4e87b4cc-edb1-4541-aff1-83012069d55c-kube-api-access-h4k45\") pod \"4e87b4cc-edb1-4541-aff1-83012069d55c\" (UID: \"4e87b4cc-edb1-4541-aff1-83012069d55c\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.555150 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-utilities" (OuterVolumeSpecName: "utilities") pod "4e87b4cc-edb1-4541-aff1-83012069d55c" (UID: "4e87b4cc-edb1-4541-aff1-83012069d55c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.579454 4948 scope.go:117] "RemoveContainer" containerID="a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.580519 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f" path="/var/lib/kubelet/pods/1ff0e5ae-c999-4d3d-a8ac-14796ee0b95f/volumes" Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.583681 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446\": container with ID starting with a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446 not found: ID does not exist" containerID="a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.584792 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446"} err="failed to get container status \"a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446\": rpc error: code = NotFound desc = could not find container \"a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446\": container with ID starting with a6c63eebaf6b875ed2154e9aa1b29466221132e87eac7815d2974e94625bf446 not found: ID does not exist" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.584860 4948 scope.go:117] "RemoveContainer" containerID="d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.583826 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e87b4cc-edb1-4541-aff1-83012069d55c-kube-api-access-h4k45" (OuterVolumeSpecName: "kube-api-access-h4k45") pod "4e87b4cc-edb1-4541-aff1-83012069d55c" (UID: "4e87b4cc-edb1-4541-aff1-83012069d55c"). InnerVolumeSpecName "kube-api-access-h4k45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.584129 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e87b4cc-edb1-4541-aff1-83012069d55c" (UID: "4e87b4cc-edb1-4541-aff1-83012069d55c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.586262 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e\": container with ID starting with d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e not found: ID does not exist" containerID="d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.586293 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e"} err="failed to get container status \"d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e\": rpc error: code = NotFound desc = could not find container \"d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e\": container with ID starting with d55b3951af12232c3727e3421af08bee7a961cb646d6b22ef6a9dad22a7b436e not found: ID does not exist" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.586328 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a443e18f-462b-4c81-9f70-3bae303f278f" path="/var/lib/kubelet/pods/a443e18f-462b-4c81-9f70-3bae303f278f/volumes" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.598379 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2hcgj_aa1c9624-c789-4df8-8c32-eb95e7c40690/extract-content/0.log" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.598804 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.639671 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kpqs5"] Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.640877 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" containerName="extract-utilities" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.641024 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" containerName="extract-utilities" Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.641122 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" containerName="extract-content" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.641242 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" containerName="extract-content" Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.641368 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" containerName="extract-content" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.641505 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" containerName="extract-content" Jan 20 19:52:56 crc kubenswrapper[4948]: E0120 19:52:56.641644 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" containerName="extract-utilities" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.641749 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" containerName="extract-utilities" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.647105 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" containerName="extract-content" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.647366 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" containerName="extract-content" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.648625 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.655601 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.655861 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e87b4cc-edb1-4541-aff1-83012069d55c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.655877 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4k45\" (UniqueName: \"kubernetes.io/projected/4e87b4cc-edb1-4541-aff1-83012069d55c-kube-api-access-h4k45\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.660908 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kpqs5"] Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.699587 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bslf8_31d44844-4319-4456-b6cc-88135734f548/extract-content/0.log" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.700517 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.758084 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvz5r\" (UniqueName: \"kubernetes.io/projected/aa1c9624-c789-4df8-8c32-eb95e7c40690-kube-api-access-hvz5r\") pod \"aa1c9624-c789-4df8-8c32-eb95e7c40690\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.758258 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-catalog-content\") pod \"aa1c9624-c789-4df8-8c32-eb95e7c40690\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.758324 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-utilities\") pod \"aa1c9624-c789-4df8-8c32-eb95e7c40690\" (UID: \"aa1c9624-c789-4df8-8c32-eb95e7c40690\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.758502 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29572b48-7ca5-4e09-83d8-dcf2cc40682b-catalog-content\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.758556 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlgp2\" (UniqueName: \"kubernetes.io/projected/29572b48-7ca5-4e09-83d8-dcf2cc40682b-kube-api-access-nlgp2\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.758577 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29572b48-7ca5-4e09-83d8-dcf2cc40682b-utilities\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.760627 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-utilities" (OuterVolumeSpecName: "utilities") pod "aa1c9624-c789-4df8-8c32-eb95e7c40690" (UID: "aa1c9624-c789-4df8-8c32-eb95e7c40690"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.764945 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa1c9624-c789-4df8-8c32-eb95e7c40690-kube-api-access-hvz5r" (OuterVolumeSpecName: "kube-api-access-hvz5r") pod "aa1c9624-c789-4df8-8c32-eb95e7c40690" (UID: "aa1c9624-c789-4df8-8c32-eb95e7c40690"). InnerVolumeSpecName "kube-api-access-hvz5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.776242 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hsxfw"] Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.790147 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aa1c9624-c789-4df8-8c32-eb95e7c40690" (UID: "aa1c9624-c789-4df8-8c32-eb95e7c40690"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.812439 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4l26k"] Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.818516 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4l26k"] Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.859425 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-utilities\") pod \"31d44844-4319-4456-b6cc-88135734f548\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.859517 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtfgl\" (UniqueName: \"kubernetes.io/projected/31d44844-4319-4456-b6cc-88135734f548-kube-api-access-gtfgl\") pod \"31d44844-4319-4456-b6cc-88135734f548\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.859590 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-catalog-content\") pod \"31d44844-4319-4456-b6cc-88135734f548\" (UID: \"31d44844-4319-4456-b6cc-88135734f548\") " Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.860324 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29572b48-7ca5-4e09-83d8-dcf2cc40682b-catalog-content\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.860406 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlgp2\" (UniqueName: \"kubernetes.io/projected/29572b48-7ca5-4e09-83d8-dcf2cc40682b-kube-api-access-nlgp2\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.860441 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29572b48-7ca5-4e09-83d8-dcf2cc40682b-utilities\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.860484 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.860512 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvz5r\" (UniqueName: \"kubernetes.io/projected/aa1c9624-c789-4df8-8c32-eb95e7c40690-kube-api-access-hvz5r\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.860527 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa1c9624-c789-4df8-8c32-eb95e7c40690-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.861064 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29572b48-7ca5-4e09-83d8-dcf2cc40682b-utilities\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.861205 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29572b48-7ca5-4e09-83d8-dcf2cc40682b-catalog-content\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.861327 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-utilities" (OuterVolumeSpecName: "utilities") pod "31d44844-4319-4456-b6cc-88135734f548" (UID: "31d44844-4319-4456-b6cc-88135734f548"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.867052 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d44844-4319-4456-b6cc-88135734f548-kube-api-access-gtfgl" (OuterVolumeSpecName: "kube-api-access-gtfgl") pod "31d44844-4319-4456-b6cc-88135734f548" (UID: "31d44844-4319-4456-b6cc-88135734f548"). InnerVolumeSpecName "kube-api-access-gtfgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.867926 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31d44844-4319-4456-b6cc-88135734f548" (UID: "31d44844-4319-4456-b6cc-88135734f548"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.880652 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlgp2\" (UniqueName: \"kubernetes.io/projected/29572b48-7ca5-4e09-83d8-dcf2cc40682b-kube-api-access-nlgp2\") pod \"redhat-operators-kpqs5\" (UID: \"29572b48-7ca5-4e09-83d8-dcf2cc40682b\") " pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.885011 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rlfcl_4c19381d-95b1-4813-8625-da98f07c486f/extract-content/0.log" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.885309 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.962204 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtfgl\" (UniqueName: \"kubernetes.io/projected/31d44844-4319-4456-b6cc-88135734f548-kube-api-access-gtfgl\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.962278 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.962313 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31d44844-4319-4456-b6cc-88135734f548-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:56 crc kubenswrapper[4948]: I0120 19:52:56.988014 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.063636 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-utilities\") pod \"4c19381d-95b1-4813-8625-da98f07c486f\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.063997 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-catalog-content\") pod \"4c19381d-95b1-4813-8625-da98f07c486f\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.064050 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bc6k\" (UniqueName: \"kubernetes.io/projected/4c19381d-95b1-4813-8625-da98f07c486f-kube-api-access-6bc6k\") pod \"4c19381d-95b1-4813-8625-da98f07c486f\" (UID: \"4c19381d-95b1-4813-8625-da98f07c486f\") " Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.065368 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-utilities" (OuterVolumeSpecName: "utilities") pod "4c19381d-95b1-4813-8625-da98f07c486f" (UID: "4c19381d-95b1-4813-8625-da98f07c486f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.069566 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c19381d-95b1-4813-8625-da98f07c486f-kube-api-access-6bc6k" (OuterVolumeSpecName: "kube-api-access-6bc6k") pod "4c19381d-95b1-4813-8625-da98f07c486f" (UID: "4c19381d-95b1-4813-8625-da98f07c486f"). InnerVolumeSpecName "kube-api-access-6bc6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.095615 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c19381d-95b1-4813-8625-da98f07c486f" (UID: "4c19381d-95b1-4813-8625-da98f07c486f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.165282 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.165328 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c19381d-95b1-4813-8625-da98f07c486f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.165344 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bc6k\" (UniqueName: \"kubernetes.io/projected/4c19381d-95b1-4813-8625-da98f07c486f-kube-api-access-6bc6k\") on node \"crc\" DevicePath \"\"" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.209607 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kpqs5"] Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.434103 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-2hcgj_aa1c9624-c789-4df8-8c32-eb95e7c40690/extract-content/0.log" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.434678 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2hcgj" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.437921 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2hcgj" event={"ID":"aa1c9624-c789-4df8-8c32-eb95e7c40690","Type":"ContainerDied","Data":"87073af38e2238e60ce135e7404510b7ddda43a21dc55b4e7adf10457c96e76f"} Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.437994 4948 scope.go:117] "RemoveContainer" containerID="343ee5ee62efaf61a02e6e54deee401f699587e7ab40c46a87370d412b68149f" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.446172 4948 generic.go:334] "Generic (PLEG): container finished" podID="f8d1e5d7-2511-47ad-b240-677792863a32" containerID="baaa20bf93156ecf4493ea4da12d73bb25960f6941d5582e62591c8e344f5466" exitCode=0 Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.446282 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hsxfw" event={"ID":"f8d1e5d7-2511-47ad-b240-677792863a32","Type":"ContainerDied","Data":"baaa20bf93156ecf4493ea4da12d73bb25960f6941d5582e62591c8e344f5466"} Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.446327 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hsxfw" event={"ID":"f8d1e5d7-2511-47ad-b240-677792863a32","Type":"ContainerStarted","Data":"f5a05a79536dffd7f4d92deb7d03dbcf2d2a89cc110e84ffeceaf7420bc2209f"} Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.450916 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-rlfcl_4c19381d-95b1-4813-8625-da98f07c486f/extract-content/0.log" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.452583 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rlfcl" event={"ID":"4c19381d-95b1-4813-8625-da98f07c486f","Type":"ContainerDied","Data":"2142dac462589be407d179441d186027072d6c86e46c2d2e1bef177fd730a575"} Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.452856 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rlfcl" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.458676 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-bslf8_31d44844-4319-4456-b6cc-88135734f548/extract-content/0.log" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.459283 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bslf8" event={"ID":"31d44844-4319-4456-b6cc-88135734f548","Type":"ContainerDied","Data":"272d5887154707aaae1ab5da235f320672d4d8739945b612ffaeb8a735869c50"} Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.459316 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bslf8" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.462394 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kpqs5" event={"ID":"29572b48-7ca5-4e09-83d8-dcf2cc40682b","Type":"ContainerStarted","Data":"bcf0d4a1075403bd7e4dca5168ccf74c7df8ac3218ab7e9ce9ba53ceb1cce091"} Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.477057 4948 scope.go:117] "RemoveContainer" containerID="d2d7dbeba7f7e26b3179720b734d5edd1232b915fcf79577b96868f1c376ae0d" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.502990 4948 scope.go:117] "RemoveContainer" containerID="56cb771c8ed5e83a35ba17ba0aff8abe79276c9e31afa6d67c449bbfba82a9a3" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.543227 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2hcgj"] Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.550283 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2hcgj"] Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.554388 4948 scope.go:117] "RemoveContainer" containerID="5df219bcf3bf34ace0059c10bcf5c1b860d2c58a0b94c73a3b88bb626fb0d4ed" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.563034 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlfcl"] Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.565541 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rlfcl"] Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.573270 4948 scope.go:117] "RemoveContainer" containerID="2df8167685b9300b840aa951c1049b00090865781790408ab6b60c7c04e72d67" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.592960 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bslf8"] Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.599898 4948 scope.go:117] "RemoveContainer" containerID="0ac19e29261806836443b8a565fb019d18ec78f44ab11da9f1aff47b7c84650a" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.603958 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bslf8"] Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.619687 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h2jd7"] Jan 20 19:52:57 crc kubenswrapper[4948]: E0120 19:52:57.620159 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c19381d-95b1-4813-8625-da98f07c486f" containerName="extract-content" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.620178 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c19381d-95b1-4813-8625-da98f07c486f" containerName="extract-content" Jan 20 19:52:57 crc kubenswrapper[4948]: E0120 19:52:57.620197 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31d44844-4319-4456-b6cc-88135734f548" containerName="extract-content" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.620203 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="31d44844-4319-4456-b6cc-88135734f548" containerName="extract-content" Jan 20 19:52:57 crc kubenswrapper[4948]: E0120 19:52:57.620215 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31d44844-4319-4456-b6cc-88135734f548" containerName="extract-utilities" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.620220 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="31d44844-4319-4456-b6cc-88135734f548" containerName="extract-utilities" Jan 20 19:52:57 crc kubenswrapper[4948]: E0120 19:52:57.620229 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c19381d-95b1-4813-8625-da98f07c486f" containerName="extract-utilities" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.620235 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c19381d-95b1-4813-8625-da98f07c486f" containerName="extract-utilities" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.620358 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c19381d-95b1-4813-8625-da98f07c486f" containerName="extract-content" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.620447 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="31d44844-4319-4456-b6cc-88135734f548" containerName="extract-content" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.629996 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.635196 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h2jd7"] Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.637142 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.773977 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52223d24-be7c-4761-8f46-efcc30f37f8b-catalog-content\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.774190 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52223d24-be7c-4761-8f46-efcc30f37f8b-utilities\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.774319 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6m7w\" (UniqueName: \"kubernetes.io/projected/52223d24-be7c-4761-8f46-efcc30f37f8b-kube-api-access-z6m7w\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.875892 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52223d24-be7c-4761-8f46-efcc30f37f8b-utilities\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.875977 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6m7w\" (UniqueName: \"kubernetes.io/projected/52223d24-be7c-4761-8f46-efcc30f37f8b-kube-api-access-z6m7w\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.876207 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52223d24-be7c-4761-8f46-efcc30f37f8b-catalog-content\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.876728 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52223d24-be7c-4761-8f46-efcc30f37f8b-utilities\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.876755 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52223d24-be7c-4761-8f46-efcc30f37f8b-catalog-content\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.912952 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6m7w\" (UniqueName: \"kubernetes.io/projected/52223d24-be7c-4761-8f46-efcc30f37f8b-kube-api-access-z6m7w\") pod \"community-operators-h2jd7\" (UID: \"52223d24-be7c-4761-8f46-efcc30f37f8b\") " pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:57 crc kubenswrapper[4948]: I0120 19:52:57.961928 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:52:58 crc kubenswrapper[4948]: I0120 19:52:58.187039 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h2jd7"] Jan 20 19:52:58 crc kubenswrapper[4948]: I0120 19:52:58.473007 4948 generic.go:334] "Generic (PLEG): container finished" podID="29572b48-7ca5-4e09-83d8-dcf2cc40682b" containerID="bb134bea8890ca6fec19a312483445a1ba780b633a6f2da3e8434b51cb2d417c" exitCode=0 Jan 20 19:52:58 crc kubenswrapper[4948]: I0120 19:52:58.473087 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kpqs5" event={"ID":"29572b48-7ca5-4e09-83d8-dcf2cc40682b","Type":"ContainerDied","Data":"bb134bea8890ca6fec19a312483445a1ba780b633a6f2da3e8434b51cb2d417c"} Jan 20 19:52:58 crc kubenswrapper[4948]: I0120 19:52:58.475882 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2jd7" event={"ID":"52223d24-be7c-4761-8f46-efcc30f37f8b","Type":"ContainerStarted","Data":"fcf8a9e83866ca8571fec67f5d466533a809a745b14a9e9fb4b29312f9ec7a48"} Jan 20 19:52:58 crc kubenswrapper[4948]: I0120 19:52:58.582445 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d44844-4319-4456-b6cc-88135734f548" path="/var/lib/kubelet/pods/31d44844-4319-4456-b6cc-88135734f548/volumes" Jan 20 19:52:58 crc kubenswrapper[4948]: I0120 19:52:58.583452 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c19381d-95b1-4813-8625-da98f07c486f" path="/var/lib/kubelet/pods/4c19381d-95b1-4813-8625-da98f07c486f/volumes" Jan 20 19:52:58 crc kubenswrapper[4948]: I0120 19:52:58.584232 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e87b4cc-edb1-4541-aff1-83012069d55c" path="/var/lib/kubelet/pods/4e87b4cc-edb1-4541-aff1-83012069d55c/volumes" Jan 20 19:52:58 crc kubenswrapper[4948]: I0120 19:52:58.585527 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa1c9624-c789-4df8-8c32-eb95e7c40690" path="/var/lib/kubelet/pods/aa1c9624-c789-4df8-8c32-eb95e7c40690/volumes" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.008278 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cpztv"] Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.009446 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.011417 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.023593 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cpztv"] Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.193978 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kglj\" (UniqueName: \"kubernetes.io/projected/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-kube-api-access-4kglj\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.194529 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-utilities\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.194637 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-catalog-content\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.296067 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-utilities\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.296140 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-catalog-content\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.296180 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kglj\" (UniqueName: \"kubernetes.io/projected/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-kube-api-access-4kglj\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.296968 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-utilities\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.297168 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-catalog-content\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.315146 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kglj\" (UniqueName: \"kubernetes.io/projected/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-kube-api-access-4kglj\") pod \"certified-operators-cpztv\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.327319 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:52:59 crc kubenswrapper[4948]: I0120 19:52:59.534634 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cpztv"] Jan 20 19:52:59 crc kubenswrapper[4948]: W0120 19:52:59.544780 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5882349f_db20_4e02_80dd_5a7f6b4e5f0f.slice/crio-8102e813a574425559b34d88d5ca6854c2a309cd0936de1ec683b79d6b9ec942 WatchSource:0}: Error finding container 8102e813a574425559b34d88d5ca6854c2a309cd0936de1ec683b79d6b9ec942: Status 404 returned error can't find the container with id 8102e813a574425559b34d88d5ca6854c2a309cd0936de1ec683b79d6b9ec942 Jan 20 19:53:00 crc kubenswrapper[4948]: I0120 19:53:00.487715 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpztv" event={"ID":"5882349f-db20-4e02-80dd-5a7f6b4e5f0f","Type":"ContainerStarted","Data":"c786d7d5b53b61f7cddfe4913701f9aae7e84db4b5f21b40e779852c6453451d"} Jan 20 19:53:00 crc kubenswrapper[4948]: I0120 19:53:00.487771 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpztv" event={"ID":"5882349f-db20-4e02-80dd-5a7f6b4e5f0f","Type":"ContainerStarted","Data":"8102e813a574425559b34d88d5ca6854c2a309cd0936de1ec683b79d6b9ec942"} Jan 20 19:53:00 crc kubenswrapper[4948]: I0120 19:53:00.488800 4948 generic.go:334] "Generic (PLEG): container finished" podID="52223d24-be7c-4761-8f46-efcc30f37f8b" containerID="01639db36713f4b7a81ec4bf9e21f8d2939dfbca8859dc655cb55ac9fd3fe46e" exitCode=0 Jan 20 19:53:00 crc kubenswrapper[4948]: I0120 19:53:00.488824 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2jd7" event={"ID":"52223d24-be7c-4761-8f46-efcc30f37f8b","Type":"ContainerDied","Data":"01639db36713f4b7a81ec4bf9e21f8d2939dfbca8859dc655cb55ac9fd3fe46e"} Jan 20 19:53:01 crc kubenswrapper[4948]: I0120 19:53:01.193384 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-qjm22" Jan 20 19:53:01 crc kubenswrapper[4948]: I0120 19:53:01.252079 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-bwm86"] Jan 20 19:53:01 crc kubenswrapper[4948]: I0120 19:53:01.497333 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpztv" event={"ID":"5882349f-db20-4e02-80dd-5a7f6b4e5f0f","Type":"ContainerDied","Data":"c786d7d5b53b61f7cddfe4913701f9aae7e84db4b5f21b40e779852c6453451d"} Jan 20 19:53:01 crc kubenswrapper[4948]: I0120 19:53:01.497179 4948 generic.go:334] "Generic (PLEG): container finished" podID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerID="c786d7d5b53b61f7cddfe4913701f9aae7e84db4b5f21b40e779852c6453451d" exitCode=0 Jan 20 19:53:03 crc kubenswrapper[4948]: I0120 19:53:03.513920 4948 generic.go:334] "Generic (PLEG): container finished" podID="f8d1e5d7-2511-47ad-b240-677792863a32" containerID="7b89296f231ec10b4edf518a7fad65e4d462c41c9a8ac93fd9fc40a20e9cd346" exitCode=0 Jan 20 19:53:03 crc kubenswrapper[4948]: I0120 19:53:03.514038 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hsxfw" event={"ID":"f8d1e5d7-2511-47ad-b240-677792863a32","Type":"ContainerDied","Data":"7b89296f231ec10b4edf518a7fad65e4d462c41c9a8ac93fd9fc40a20e9cd346"} Jan 20 19:53:03 crc kubenswrapper[4948]: I0120 19:53:03.517727 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kpqs5" event={"ID":"29572b48-7ca5-4e09-83d8-dcf2cc40682b","Type":"ContainerStarted","Data":"a1a91c75a73b53a95fbce1c7bfc6f45fa6c1308cad265d5d1884566ebb3d3590"} Jan 20 19:53:04 crc kubenswrapper[4948]: I0120 19:53:04.560832 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hsxfw" event={"ID":"f8d1e5d7-2511-47ad-b240-677792863a32","Type":"ContainerStarted","Data":"1ea5f8a520c7fba854d611ab2a3a7ac5b9ddd27e56b19a62be137e7d796c8c86"} Jan 20 19:53:04 crc kubenswrapper[4948]: I0120 19:53:04.565829 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2jd7" event={"ID":"52223d24-be7c-4761-8f46-efcc30f37f8b","Type":"ContainerStarted","Data":"9c9a375e472933b224c3a186e6b5bf435531116bcb199ca12bbeaf4244969067"} Jan 20 19:53:04 crc kubenswrapper[4948]: I0120 19:53:04.567978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpztv" event={"ID":"5882349f-db20-4e02-80dd-5a7f6b4e5f0f","Type":"ContainerStarted","Data":"a0f2a35e63c95bb1c50f43243b1414fc76be85055ad06e4de510d28d847bbc71"} Jan 20 19:53:04 crc kubenswrapper[4948]: I0120 19:53:04.584311 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hsxfw" podStartSLOduration=3.059374664 podStartE2EDuration="9.584285976s" podCreationTimestamp="2026-01-20 19:52:55 +0000 UTC" firstStartedPulling="2026-01-20 19:52:57.449599574 +0000 UTC m=+205.400324543" lastFinishedPulling="2026-01-20 19:53:03.974510886 +0000 UTC m=+211.925235855" observedRunningTime="2026-01-20 19:53:04.580457713 +0000 UTC m=+212.531182682" watchObservedRunningTime="2026-01-20 19:53:04.584285976 +0000 UTC m=+212.535010945" Jan 20 19:53:04 crc kubenswrapper[4948]: I0120 19:53:04.973822 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h"] Jan 20 19:53:04 crc kubenswrapper[4948]: I0120 19:53:04.974033 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" podUID="f8f09ba9-24f6-472e-8d51-9991c732386b" containerName="controller-manager" containerID="cri-o://f8ec1e4f4846fa5100309825dcadf9f0f2559220ca2987aef70803f39844768d" gracePeriod=30 Jan 20 19:53:04 crc kubenswrapper[4948]: I0120 19:53:04.981280 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl"] Jan 20 19:53:04 crc kubenswrapper[4948]: I0120 19:53:04.981682 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" podUID="7c36b505-5b12-409d-a6cc-63c7ab827fec" containerName="route-controller-manager" containerID="cri-o://78733da8e436856ad89bc8e5fe0dc5db88ece6739df841ddd4e3c6fa7001a80b" gracePeriod=30 Jan 20 19:53:05 crc kubenswrapper[4948]: E0120 19:53:05.529418 4948 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52223d24_be7c_4761_8f46_efcc30f37f8b.slice/crio-9c9a375e472933b224c3a186e6b5bf435531116bcb199ca12bbeaf4244969067.scope\": RecentStats: unable to find data in memory cache]" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.585024 4948 generic.go:334] "Generic (PLEG): container finished" podID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerID="a0f2a35e63c95bb1c50f43243b1414fc76be85055ad06e4de510d28d847bbc71" exitCode=0 Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.585078 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpztv" event={"ID":"5882349f-db20-4e02-80dd-5a7f6b4e5f0f","Type":"ContainerDied","Data":"a0f2a35e63c95bb1c50f43243b1414fc76be85055ad06e4de510d28d847bbc71"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.590890 4948 generic.go:334] "Generic (PLEG): container finished" podID="7c36b505-5b12-409d-a6cc-63c7ab827fec" containerID="78733da8e436856ad89bc8e5fe0dc5db88ece6739df841ddd4e3c6fa7001a80b" exitCode=0 Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.590969 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" event={"ID":"7c36b505-5b12-409d-a6cc-63c7ab827fec","Type":"ContainerDied","Data":"78733da8e436856ad89bc8e5fe0dc5db88ece6739df841ddd4e3c6fa7001a80b"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.591042 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" event={"ID":"7c36b505-5b12-409d-a6cc-63c7ab827fec","Type":"ContainerDied","Data":"7b1d36fbf562b1ba797c43a4fa9814b3870cee3566e660914a180a0fe4d09e4a"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.591054 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b1d36fbf562b1ba797c43a4fa9814b3870cee3566e660914a180a0fe4d09e4a" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.592339 4948 generic.go:334] "Generic (PLEG): container finished" podID="29572b48-7ca5-4e09-83d8-dcf2cc40682b" containerID="a1a91c75a73b53a95fbce1c7bfc6f45fa6c1308cad265d5d1884566ebb3d3590" exitCode=0 Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.592374 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kpqs5" event={"ID":"29572b48-7ca5-4e09-83d8-dcf2cc40682b","Type":"ContainerDied","Data":"a1a91c75a73b53a95fbce1c7bfc6f45fa6c1308cad265d5d1884566ebb3d3590"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.597398 4948 generic.go:334] "Generic (PLEG): container finished" podID="f8f09ba9-24f6-472e-8d51-9991c732386b" containerID="f8ec1e4f4846fa5100309825dcadf9f0f2559220ca2987aef70803f39844768d" exitCode=0 Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.597455 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" event={"ID":"f8f09ba9-24f6-472e-8d51-9991c732386b","Type":"ContainerDied","Data":"f8ec1e4f4846fa5100309825dcadf9f0f2559220ca2987aef70803f39844768d"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.609126 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.611329 4948 generic.go:334] "Generic (PLEG): container finished" podID="52223d24-be7c-4761-8f46-efcc30f37f8b" containerID="9c9a375e472933b224c3a186e6b5bf435531116bcb199ca12bbeaf4244969067" exitCode=0 Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.614066 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2jd7" event={"ID":"52223d24-be7c-4761-8f46-efcc30f37f8b","Type":"ContainerDied","Data":"9c9a375e472933b224c3a186e6b5bf435531116bcb199ca12bbeaf4244969067"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.695095 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-client-ca\") pod \"7c36b505-5b12-409d-a6cc-63c7ab827fec\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.695468 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4qzd\" (UniqueName: \"kubernetes.io/projected/7c36b505-5b12-409d-a6cc-63c7ab827fec-kube-api-access-g4qzd\") pod \"7c36b505-5b12-409d-a6cc-63c7ab827fec\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.695492 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c36b505-5b12-409d-a6cc-63c7ab827fec-serving-cert\") pod \"7c36b505-5b12-409d-a6cc-63c7ab827fec\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.695532 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-config\") pod \"7c36b505-5b12-409d-a6cc-63c7ab827fec\" (UID: \"7c36b505-5b12-409d-a6cc-63c7ab827fec\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.697335 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-config" (OuterVolumeSpecName: "config") pod "7c36b505-5b12-409d-a6cc-63c7ab827fec" (UID: "7c36b505-5b12-409d-a6cc-63c7ab827fec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.697445 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-client-ca" (OuterVolumeSpecName: "client-ca") pod "7c36b505-5b12-409d-a6cc-63c7ab827fec" (UID: "7c36b505-5b12-409d-a6cc-63c7ab827fec"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.703160 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c36b505-5b12-409d-a6cc-63c7ab827fec-kube-api-access-g4qzd" (OuterVolumeSpecName: "kube-api-access-g4qzd") pod "7c36b505-5b12-409d-a6cc-63c7ab827fec" (UID: "7c36b505-5b12-409d-a6cc-63c7ab827fec"). InnerVolumeSpecName "kube-api-access-g4qzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.704407 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c36b505-5b12-409d-a6cc-63c7ab827fec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7c36b505-5b12-409d-a6cc-63c7ab827fec" (UID: "7c36b505-5b12-409d-a6cc-63c7ab827fec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.748966 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.749160 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.796207 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.796249 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4qzd\" (UniqueName: \"kubernetes.io/projected/7c36b505-5b12-409d-a6cc-63c7ab827fec-kube-api-access-g4qzd\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.796265 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7c36b505-5b12-409d-a6cc-63c7ab827fec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.796278 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c36b505-5b12-409d-a6cc-63c7ab827fec-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.822752 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.998119 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7lvb\" (UniqueName: \"kubernetes.io/projected/f8f09ba9-24f6-472e-8d51-9991c732386b-kube-api-access-f7lvb\") pod \"f8f09ba9-24f6-472e-8d51-9991c732386b\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.998232 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-proxy-ca-bundles\") pod \"f8f09ba9-24f6-472e-8d51-9991c732386b\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.998272 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8f09ba9-24f6-472e-8d51-9991c732386b-serving-cert\") pod \"f8f09ba9-24f6-472e-8d51-9991c732386b\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.998301 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-client-ca\") pod \"f8f09ba9-24f6-472e-8d51-9991c732386b\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.998323 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-config\") pod \"f8f09ba9-24f6-472e-8d51-9991c732386b\" (UID: \"f8f09ba9-24f6-472e-8d51-9991c732386b\") " Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.999488 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-config" (OuterVolumeSpecName: "config") pod "f8f09ba9-24f6-472e-8d51-9991c732386b" (UID: "f8f09ba9-24f6-472e-8d51-9991c732386b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:05.999576 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f8f09ba9-24f6-472e-8d51-9991c732386b" (UID: "f8f09ba9-24f6-472e-8d51-9991c732386b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.000914 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-client-ca" (OuterVolumeSpecName: "client-ca") pod "f8f09ba9-24f6-472e-8d51-9991c732386b" (UID: "f8f09ba9-24f6-472e-8d51-9991c732386b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.003462 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8f09ba9-24f6-472e-8d51-9991c732386b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f8f09ba9-24f6-472e-8d51-9991c732386b" (UID: "f8f09ba9-24f6-472e-8d51-9991c732386b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.003867 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8f09ba9-24f6-472e-8d51-9991c732386b-kube-api-access-f7lvb" (OuterVolumeSpecName: "kube-api-access-f7lvb") pod "f8f09ba9-24f6-472e-8d51-9991c732386b" (UID: "f8f09ba9-24f6-472e-8d51-9991c732386b"). InnerVolumeSpecName "kube-api-access-f7lvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.101153 4948 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8f09ba9-24f6-472e-8d51-9991c732386b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.101198 4948 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.101210 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.101227 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7lvb\" (UniqueName: \"kubernetes.io/projected/f8f09ba9-24f6-472e-8d51-9991c732386b-kube-api-access-f7lvb\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.101243 4948 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f8f09ba9-24f6-472e-8d51-9991c732386b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.228816 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8587f68d9-qkppd"] Jan 20 19:53:06 crc kubenswrapper[4948]: E0120 19:53:06.229213 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8f09ba9-24f6-472e-8d51-9991c732386b" containerName="controller-manager" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.229234 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8f09ba9-24f6-472e-8d51-9991c732386b" containerName="controller-manager" Jan 20 19:53:06 crc kubenswrapper[4948]: E0120 19:53:06.229270 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c36b505-5b12-409d-a6cc-63c7ab827fec" containerName="route-controller-manager" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.229279 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c36b505-5b12-409d-a6cc-63c7ab827fec" containerName="route-controller-manager" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.229392 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8f09ba9-24f6-472e-8d51-9991c732386b" containerName="controller-manager" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.229412 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c36b505-5b12-409d-a6cc-63c7ab827fec" containerName="route-controller-manager" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.229945 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.235264 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58"] Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.236002 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.244377 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8587f68d9-qkppd"] Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.262289 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58"] Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.406747 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2zmh\" (UniqueName: \"kubernetes.io/projected/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-kube-api-access-f2zmh\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.406809 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-client-ca\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.406839 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71724a94-719b-4373-bd0a-00a06c5864f9-config\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.406901 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dgbw\" (UniqueName: \"kubernetes.io/projected/71724a94-719b-4373-bd0a-00a06c5864f9-kube-api-access-4dgbw\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.406916 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-proxy-ca-bundles\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.406977 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-serving-cert\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.406997 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71724a94-719b-4373-bd0a-00a06c5864f9-serving-cert\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.407658 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-config\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.407722 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/71724a94-719b-4373-bd0a-00a06c5864f9-client-ca\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509278 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/71724a94-719b-4373-bd0a-00a06c5864f9-client-ca\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509334 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2zmh\" (UniqueName: \"kubernetes.io/projected/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-kube-api-access-f2zmh\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509360 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-client-ca\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509382 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dgbw\" (UniqueName: \"kubernetes.io/projected/71724a94-719b-4373-bd0a-00a06c5864f9-kube-api-access-4dgbw\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509399 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-proxy-ca-bundles\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509415 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71724a94-719b-4373-bd0a-00a06c5864f9-config\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509434 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-serving-cert\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509452 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71724a94-719b-4373-bd0a-00a06c5864f9-serving-cert\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.509485 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-config\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.510331 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/71724a94-719b-4373-bd0a-00a06c5864f9-client-ca\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.510726 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-config\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.511128 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-proxy-ca-bundles\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.511523 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/71724a94-719b-4373-bd0a-00a06c5864f9-config\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.511947 4948 patch_prober.go:28] interesting pod/route-controller-manager-5f65fb8948-hlfhl container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.512112 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" podUID="7c36b505-5b12-409d-a6cc-63c7ab827fec" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.63:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.518051 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/71724a94-719b-4373-bd0a-00a06c5864f9-serving-cert\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.529943 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-serving-cert\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.535037 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2zmh\" (UniqueName: \"kubernetes.io/projected/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-kube-api-access-f2zmh\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.536111 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dgbw\" (UniqueName: \"kubernetes.io/projected/71724a94-719b-4373-bd0a-00a06c5864f9-kube-api-access-4dgbw\") pod \"route-controller-manager-5454b957b9-fbc58\" (UID: \"71724a94-719b-4373-bd0a-00a06c5864f9\") " pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.584005 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c0fd9a37-336c-4c1a-b750-8eb8442f4baa-client-ca\") pod \"controller-manager-8587f68d9-qkppd\" (UID: \"c0fd9a37-336c-4c1a-b750-8eb8442f4baa\") " pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.618274 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.630853 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.633296 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" event={"ID":"f8f09ba9-24f6-472e-8d51-9991c732386b","Type":"ContainerDied","Data":"dc4f903532d5044e99e79963bd4e44b20f99697a42b544372bddb4c5593d9c7a"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.633342 4948 scope.go:117] "RemoveContainer" containerID="f8ec1e4f4846fa5100309825dcadf9f0f2559220ca2987aef70803f39844768d" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.633473 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.645222 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h2jd7" event={"ID":"52223d24-be7c-4761-8f46-efcc30f37f8b","Type":"ContainerStarted","Data":"01f29f2859248bcd54e73986bf2b0c981a6110dbcd3888fd441fe4f9587e58c4"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.673623 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h"] Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.697538 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpztv" event={"ID":"5882349f-db20-4e02-80dd-5a7f6b4e5f0f","Type":"ContainerStarted","Data":"d5c55826673facc08a010914dca1e1855c9447cbc10b2b32f64e610171d93fca"} Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.698437 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.714546 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6c75f5bc9c-bkb4h"] Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.716189 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h2jd7" podStartSLOduration=6.622561928 podStartE2EDuration="9.716177385s" podCreationTimestamp="2026-01-20 19:52:57 +0000 UTC" firstStartedPulling="2026-01-20 19:53:03.043436562 +0000 UTC m=+210.994161531" lastFinishedPulling="2026-01-20 19:53:06.137052009 +0000 UTC m=+214.087776988" observedRunningTime="2026-01-20 19:53:06.715592308 +0000 UTC m=+214.666317277" watchObservedRunningTime="2026-01-20 19:53:06.716177385 +0000 UTC m=+214.666902354" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.777289 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cpztv" podStartSLOduration=5.672989155 podStartE2EDuration="8.777269556s" podCreationTimestamp="2026-01-20 19:52:58 +0000 UTC" firstStartedPulling="2026-01-20 19:53:03.020675371 +0000 UTC m=+210.971400330" lastFinishedPulling="2026-01-20 19:53:06.124955762 +0000 UTC m=+214.075680731" observedRunningTime="2026-01-20 19:53:06.740255285 +0000 UTC m=+214.690980254" watchObservedRunningTime="2026-01-20 19:53:06.777269556 +0000 UTC m=+214.727994525" Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.781368 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl"] Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.783679 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5f65fb8948-hlfhl"] Jan 20 19:53:06 crc kubenswrapper[4948]: I0120 19:53:06.874873 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-hsxfw" podUID="f8d1e5d7-2511-47ad-b240-677792863a32" containerName="registry-server" probeResult="failure" output=< Jan 20 19:53:06 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 19:53:06 crc kubenswrapper[4948]: > Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.194978 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8587f68d9-qkppd"] Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.300822 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58"] Jan 20 19:53:07 crc kubenswrapper[4948]: W0120 19:53:07.319836 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71724a94_719b_4373_bd0a_00a06c5864f9.slice/crio-ca1f8b32ebd102da37d013b4d6d77fb725f18253994e3b0c9b35099d4b862d0a WatchSource:0}: Error finding container ca1f8b32ebd102da37d013b4d6d77fb725f18253994e3b0c9b35099d4b862d0a: Status 404 returned error can't find the container with id ca1f8b32ebd102da37d013b4d6d77fb725f18253994e3b0c9b35099d4b862d0a Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.706338 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kpqs5" event={"ID":"29572b48-7ca5-4e09-83d8-dcf2cc40682b","Type":"ContainerStarted","Data":"c64a8bdc117969fb75a0f4f26d3ff761004493a318dec8c2ac84eaf0d45d4d04"} Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.710582 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" event={"ID":"c0fd9a37-336c-4c1a-b750-8eb8442f4baa","Type":"ContainerStarted","Data":"c729cc03cc740da51f2ec0dde4d0c7c9e4264d9ad912f8ca100ee92470a3c6df"} Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.710640 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" event={"ID":"c0fd9a37-336c-4c1a-b750-8eb8442f4baa","Type":"ContainerStarted","Data":"6fbe772e7fbc5389f3d72e98f36e0ad9a1665e8f77dc8a8e82f1168ba0abf9d6"} Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.712220 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" event={"ID":"71724a94-719b-4373-bd0a-00a06c5864f9","Type":"ContainerStarted","Data":"08d3dd929eacdb1c4e47317d365f16f546044b3abab74ee9bb770c8f7ba6fe87"} Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.712247 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" event={"ID":"71724a94-719b-4373-bd0a-00a06c5864f9","Type":"ContainerStarted","Data":"ca1f8b32ebd102da37d013b4d6d77fb725f18253994e3b0c9b35099d4b862d0a"} Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.738329 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kpqs5" podStartSLOduration=4.169323687 podStartE2EDuration="11.738311924s" podCreationTimestamp="2026-01-20 19:52:56 +0000 UTC" firstStartedPulling="2026-01-20 19:52:59.485641068 +0000 UTC m=+207.436366037" lastFinishedPulling="2026-01-20 19:53:07.054629305 +0000 UTC m=+215.005354274" observedRunningTime="2026-01-20 19:53:07.736440538 +0000 UTC m=+215.687165507" watchObservedRunningTime="2026-01-20 19:53:07.738311924 +0000 UTC m=+215.689036893" Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.766965 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" podStartSLOduration=2.766948438 podStartE2EDuration="2.766948438s" podCreationTimestamp="2026-01-20 19:53:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:53:07.76260832 +0000 UTC m=+215.713333289" watchObservedRunningTime="2026-01-20 19:53:07.766948438 +0000 UTC m=+215.717673407" Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.962824 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:53:07 crc kubenswrapper[4948]: I0120 19:53:07.964536 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:53:08 crc kubenswrapper[4948]: I0120 19:53:08.576834 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c36b505-5b12-409d-a6cc-63c7ab827fec" path="/var/lib/kubelet/pods/7c36b505-5b12-409d-a6cc-63c7ab827fec/volumes" Jan 20 19:53:08 crc kubenswrapper[4948]: I0120 19:53:08.578003 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8f09ba9-24f6-472e-8d51-9991c732386b" path="/var/lib/kubelet/pods/f8f09ba9-24f6-472e-8d51-9991c732386b/volumes" Jan 20 19:53:08 crc kubenswrapper[4948]: I0120 19:53:08.753814 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:08 crc kubenswrapper[4948]: I0120 19:53:08.753858 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:08 crc kubenswrapper[4948]: I0120 19:53:08.757697 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" Jan 20 19:53:08 crc kubenswrapper[4948]: I0120 19:53:08.760010 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5454b957b9-fbc58" Jan 20 19:53:08 crc kubenswrapper[4948]: I0120 19:53:08.788849 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8587f68d9-qkppd" podStartSLOduration=3.788831489 podStartE2EDuration="3.788831489s" podCreationTimestamp="2026-01-20 19:53:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:53:07.848181813 +0000 UTC m=+215.798906782" watchObservedRunningTime="2026-01-20 19:53:08.788831489 +0000 UTC m=+216.739556458" Jan 20 19:53:09 crc kubenswrapper[4948]: I0120 19:53:09.024998 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-h2jd7" podUID="52223d24-be7c-4761-8f46-efcc30f37f8b" containerName="registry-server" probeResult="failure" output=< Jan 20 19:53:09 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 19:53:09 crc kubenswrapper[4948]: > Jan 20 19:53:09 crc kubenswrapper[4948]: I0120 19:53:09.328151 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:53:09 crc kubenswrapper[4948]: I0120 19:53:09.328248 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:53:10 crc kubenswrapper[4948]: I0120 19:53:10.362233 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-cpztv" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="registry-server" probeResult="failure" output=< Jan 20 19:53:10 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 19:53:10 crc kubenswrapper[4948]: > Jan 20 19:53:14 crc kubenswrapper[4948]: I0120 19:53:14.802666 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerName="oauth-openshift" containerID="cri-o://d16b9bf027baa151c3deefa2434cbe49f94c835bc3c58ab2f402ae916429a9b1" gracePeriod=15 Jan 20 19:53:16 crc kubenswrapper[4948]: I0120 19:53:16.132183 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:53:16 crc kubenswrapper[4948]: I0120 19:53:16.182221 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hsxfw" Jan 20 19:53:16 crc kubenswrapper[4948]: I0120 19:53:16.449025 4948 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-vxm8l container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 20 19:53:16 crc kubenswrapper[4948]: I0120 19:53:16.449596 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 20 19:53:16 crc kubenswrapper[4948]: I0120 19:53:16.794835 4948 generic.go:334] "Generic (PLEG): container finished" podID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerID="d16b9bf027baa151c3deefa2434cbe49f94c835bc3c58ab2f402ae916429a9b1" exitCode=0 Jan 20 19:53:16 crc kubenswrapper[4948]: I0120 19:53:16.794953 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" event={"ID":"65a093ae-de0d-4938-9fe8-ba43c4b3eef0","Type":"ContainerDied","Data":"d16b9bf027baa151c3deefa2434cbe49f94c835bc3c58ab2f402ae916429a9b1"} Jan 20 19:53:16 crc kubenswrapper[4948]: I0120 19:53:16.988922 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:53:16 crc kubenswrapper[4948]: I0120 19:53:16.988975 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.054253 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.331128 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.414742 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-5b9d67559d-cg7qx"] Jan 20 19:53:17 crc kubenswrapper[4948]: E0120 19:53:17.415022 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerName="oauth-openshift" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.415043 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerName="oauth-openshift" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.415166 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" containerName="oauth-openshift" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.415611 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428460 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-trusted-ca-bundle\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428526 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-session\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428558 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-dir\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428581 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-service-ca\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428610 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-idp-0-file-data\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428643 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-cliconfig\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428664 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-serving-cert\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428683 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-ocp-branding-template\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428701 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx4pw\" (UniqueName: \"kubernetes.io/projected/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-kube-api-access-hx4pw\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428771 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-error\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428798 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-router-certs\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428828 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-provider-selection\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428853 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-login\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.428877 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-policies\") pod \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\" (UID: \"65a093ae-de0d-4938-9fe8-ba43c4b3eef0\") " Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429048 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-router-certs\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429071 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429091 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429107 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-error\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429133 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429159 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/14d94857-8499-4e2a-b579-31472f6a964b-audit-dir\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429183 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-audit-policies\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429210 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429243 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429267 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-service-ca\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429287 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-session\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429305 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429321 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-login\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429340 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvn8j\" (UniqueName: \"kubernetes.io/projected/14d94857-8499-4e2a-b579-31472f6a964b-kube-api-access-xvn8j\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429371 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429634 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429684 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.429954 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.430437 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.431307 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5b9d67559d-cg7qx"] Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.435198 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.438851 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.439085 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.442189 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.443893 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-kube-api-access-hx4pw" (OuterVolumeSpecName: "kube-api-access-hx4pw") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "kube-api-access-hx4pw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.450875 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.451057 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.452895 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.454038 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "65a093ae-de0d-4938-9fe8-ba43c4b3eef0" (UID: "65a093ae-de0d-4938-9fe8-ba43c4b3eef0"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.530756 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.530838 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/14d94857-8499-4e2a-b579-31472f6a964b-audit-dir\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.530866 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-audit-policies\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.530901 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.530935 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.530958 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-service-ca\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.530988 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-session\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531012 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531035 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-login\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531061 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvn8j\" (UniqueName: \"kubernetes.io/projected/14d94857-8499-4e2a-b579-31472f6a964b-kube-api-access-xvn8j\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531124 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-router-certs\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531147 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531174 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531208 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-error\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531264 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531281 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531295 4948 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531306 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531319 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531331 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531344 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531355 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx4pw\" (UniqueName: \"kubernetes.io/projected/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-kube-api-access-hx4pw\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531366 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531380 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531393 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531406 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531419 4948 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.531431 4948 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/65a093ae-de0d-4938-9fe8-ba43c4b3eef0-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.532628 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-audit-policies\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.532665 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.533033 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-service-ca\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.534035 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.534569 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/14d94857-8499-4e2a-b579-31472f6a964b-audit-dir\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.535331 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-session\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.535549 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.535957 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.536661 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-error\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.537530 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.538642 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-template-login\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.540527 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-system-router-certs\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.544180 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/14d94857-8499-4e2a-b579-31472f6a964b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.546844 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvn8j\" (UniqueName: \"kubernetes.io/projected/14d94857-8499-4e2a-b579-31472f6a964b-kube-api-access-xvn8j\") pod \"oauth-openshift-5b9d67559d-cg7qx\" (UID: \"14d94857-8499-4e2a-b579-31472f6a964b\") " pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.736784 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.811899 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.816614 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-vxm8l" event={"ID":"65a093ae-de0d-4938-9fe8-ba43c4b3eef0","Type":"ContainerDied","Data":"d75d9c8131bcf2d382557aa61e598740ff2a71289e8d5c223ba41f5b6749d6e0"} Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.816675 4948 scope.go:117] "RemoveContainer" containerID="d16b9bf027baa151c3deefa2434cbe49f94c835bc3c58ab2f402ae916429a9b1" Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.849132 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vxm8l"] Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.852528 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-vxm8l"] Jan 20 19:53:17 crc kubenswrapper[4948]: I0120 19:53:17.880298 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kpqs5" Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.002821 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.041107 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h2jd7" Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.168518 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-5b9d67559d-cg7qx"] Jan 20 19:53:18 crc kubenswrapper[4948]: W0120 19:53:18.176956 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14d94857_8499_4e2a_b579_31472f6a964b.slice/crio-3fee3b8e6bf5a9d369e9e88de71212ee24e967338933070364023fbfe69d76b1 WatchSource:0}: Error finding container 3fee3b8e6bf5a9d369e9e88de71212ee24e967338933070364023fbfe69d76b1: Status 404 returned error can't find the container with id 3fee3b8e6bf5a9d369e9e88de71212ee24e967338933070364023fbfe69d76b1 Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.578050 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65a093ae-de0d-4938-9fe8-ba43c4b3eef0" path="/var/lib/kubelet/pods/65a093ae-de0d-4938-9fe8-ba43c4b3eef0/volumes" Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.817666 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" event={"ID":"14d94857-8499-4e2a-b579-31472f6a964b","Type":"ContainerStarted","Data":"dfbc8477d93519b5419fdf4695c81755cac0888ebdaa33e93b51b221a53597b7"} Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.817742 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" event={"ID":"14d94857-8499-4e2a-b579-31472f6a964b","Type":"ContainerStarted","Data":"3fee3b8e6bf5a9d369e9e88de71212ee24e967338933070364023fbfe69d76b1"} Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.817952 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.845016 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" podStartSLOduration=29.844998911 podStartE2EDuration="29.844998911s" podCreationTimestamp="2026-01-20 19:52:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:53:18.843867897 +0000 UTC m=+226.794592896" watchObservedRunningTime="2026-01-20 19:53:18.844998911 +0000 UTC m=+226.795723880" Jan 20 19:53:18 crc kubenswrapper[4948]: I0120 19:53:18.927460 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.156533 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-5b9d67559d-cg7qx" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263462 4948 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263515 4948 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.263751 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263771 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.263787 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263794 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.263806 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263811 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.263817 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263822 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.263834 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263839 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.263849 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263855 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263944 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263953 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263962 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263973 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263978 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.263989 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.264076 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.264082 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.265199 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac" gracePeriod=15 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.265214 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d" gracePeriod=15 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.265241 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536" gracePeriod=15 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.265292 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf" gracePeriod=15 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.265313 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821" gracePeriod=15 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.266178 4948 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.267225 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.300681 4948 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.335920 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.362380 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.362604 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.362745 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.363028 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.363114 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.363208 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.363295 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.363418 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.407472 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.465309 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.465510 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.465632 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.465817 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.465934 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466032 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466117 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466215 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466499 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466809 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466844 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466864 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466882 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466900 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.466919 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.467117 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.486194 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cpztv" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.632360 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.657135 4948 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.180:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188c88774edf314b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-20 19:53:19.655498059 +0000 UTC m=+227.606223028,LastTimestamp:2026-01-20 19:53:19.655498059 +0000 UTC m=+227.606223028,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 20 19:53:19 crc kubenswrapper[4948]: E0120 19:53:19.676947 4948 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.180:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188c88774edf314b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-20 19:53:19.655498059 +0000 UTC m=+227.606223028,LastTimestamp:2026-01-20 19:53:19.655498059 +0000 UTC m=+227.606223028,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.826780 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"49830e069235227d0017d2905a0a4eee19501708a673853cf81be5409ac6540f"} Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.830002 4948 generic.go:334] "Generic (PLEG): container finished" podID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" containerID="bfffe0c60794c310b4c2fa84da3d2fdb0f4c958e2183fe5c6035ae2d8437e424" exitCode=0 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.830077 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"5bce8cba-e89c-4a8a-b261-ad8bae824ec9","Type":"ContainerDied","Data":"bfffe0c60794c310b4c2fa84da3d2fdb0f4c958e2183fe5c6035ae2d8437e424"} Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.834084 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.835278 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.836077 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821" exitCode=0 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.836097 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d" exitCode=0 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.836105 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536" exitCode=0 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.836113 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf" exitCode=2 Jan 20 19:53:19 crc kubenswrapper[4948]: I0120 19:53:19.836199 4948 scope.go:117] "RemoveContainer" containerID="095f1782ebbfe6705c839477b9a64f3ba3d5d374c1c1b3a7d4829e460bb2984d" Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.250392 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.250820 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.250874 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.251376 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.251438 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185" gracePeriod=600 Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.843538 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185" exitCode=0 Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.843598 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185"} Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.843978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"615f93555b1b0a9ccd007e1b86dbe692ba729e13c19eaa173e866087cfea406b"} Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.848459 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 20 19:53:20 crc kubenswrapper[4948]: I0120 19:53:20.852463 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"d7a99c8c94dad8536c1e3d8e0cf88572f821c9483561a0294662b421e87667b4"} Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.208236 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.303140 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kube-api-access\") pod \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.303207 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kubelet-dir\") pod \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.303357 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-var-lock\") pod \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\" (UID: \"5bce8cba-e89c-4a8a-b261-ad8bae824ec9\") " Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.303369 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "5bce8cba-e89c-4a8a-b261-ad8bae824ec9" (UID: "5bce8cba-e89c-4a8a-b261-ad8bae824ec9"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.303454 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-var-lock" (OuterVolumeSpecName: "var-lock") pod "5bce8cba-e89c-4a8a-b261-ad8bae824ec9" (UID: "5bce8cba-e89c-4a8a-b261-ad8bae824ec9"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.303629 4948 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-var-lock\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.303650 4948 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.311251 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "5bce8cba-e89c-4a8a-b261-ad8bae824ec9" (UID: "5bce8cba-e89c-4a8a-b261-ad8bae824ec9"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.405309 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5bce8cba-e89c-4a8a-b261-ad8bae824ec9-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.820177 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.821020 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.862268 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.863088 4948 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac" exitCode=0 Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.863168 4948 scope.go:117] "RemoveContainer" containerID="ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.863214 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.866486 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.869413 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"5bce8cba-e89c-4a8a-b261-ad8bae824ec9","Type":"ContainerDied","Data":"12bd6f07ade0778d2aaa3876890f276cdb6f900419937f6dc4559097e1acd045"} Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.869473 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12bd6f07ade0778d2aaa3876890f276cdb6f900419937f6dc4559097e1acd045" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.878390 4948 scope.go:117] "RemoveContainer" containerID="b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.898803 4948 scope.go:117] "RemoveContainer" containerID="0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.911231 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.911325 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.911374 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.911675 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.911748 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.911720 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.911973 4948 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.911993 4948 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.914914 4948 scope.go:117] "RemoveContainer" containerID="2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.933498 4948 scope.go:117] "RemoveContainer" containerID="b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.961564 4948 scope.go:117] "RemoveContainer" containerID="2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.978977 4948 scope.go:117] "RemoveContainer" containerID="ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821" Jan 20 19:53:21 crc kubenswrapper[4948]: E0120 19:53:21.979606 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\": container with ID starting with ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821 not found: ID does not exist" containerID="ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.979657 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821"} err="failed to get container status \"ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\": rpc error: code = NotFound desc = could not find container \"ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821\": container with ID starting with ef3cfaeb079c884a0f7f8113af75b71d8274c379e42a33950e9a5775813bd821 not found: ID does not exist" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.979687 4948 scope.go:117] "RemoveContainer" containerID="b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d" Jan 20 19:53:21 crc kubenswrapper[4948]: E0120 19:53:21.980091 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\": container with ID starting with b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d not found: ID does not exist" containerID="b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.980209 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d"} err="failed to get container status \"b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\": rpc error: code = NotFound desc = could not find container \"b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d\": container with ID starting with b1c91ed982ac9e46ad54069e51b995a93552f8fe862f142e92ec92003e91a41d not found: ID does not exist" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.980289 4948 scope.go:117] "RemoveContainer" containerID="0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536" Jan 20 19:53:21 crc kubenswrapper[4948]: E0120 19:53:21.980623 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\": container with ID starting with 0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536 not found: ID does not exist" containerID="0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.980736 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536"} err="failed to get container status \"0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\": rpc error: code = NotFound desc = could not find container \"0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536\": container with ID starting with 0216fc60b0159c1095e0535cb32c93c92b6bf1b6b854dde2a82e1890206cf536 not found: ID does not exist" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.980817 4948 scope.go:117] "RemoveContainer" containerID="2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf" Jan 20 19:53:21 crc kubenswrapper[4948]: E0120 19:53:21.981087 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\": container with ID starting with 2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf not found: ID does not exist" containerID="2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.981165 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf"} err="failed to get container status \"2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\": rpc error: code = NotFound desc = could not find container \"2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf\": container with ID starting with 2631308930eb9c05a7c66ca4463ed0390bd9a7a934a58add4af410002b0892bf not found: ID does not exist" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.981238 4948 scope.go:117] "RemoveContainer" containerID="b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac" Jan 20 19:53:21 crc kubenswrapper[4948]: E0120 19:53:21.981495 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\": container with ID starting with b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac not found: ID does not exist" containerID="b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.981564 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac"} err="failed to get container status \"b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\": rpc error: code = NotFound desc = could not find container \"b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac\": container with ID starting with b7cfd4c0f9e0e9a5eb334c14db6b91927d4a543485d2ce1c30d54e61e5188eac not found: ID does not exist" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.981623 4948 scope.go:117] "RemoveContainer" containerID="2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740" Jan 20 19:53:21 crc kubenswrapper[4948]: E0120 19:53:21.981930 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\": container with ID starting with 2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740 not found: ID does not exist" containerID="2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740" Jan 20 19:53:21 crc kubenswrapper[4948]: I0120 19:53:21.981999 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740"} err="failed to get container status \"2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\": rpc error: code = NotFound desc = could not find container \"2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740\": container with ID starting with 2d9e7ce0304be4b2babc6ddbdb23d3ef16a466c0b545cbf4f482a9a7dd103740 not found: ID does not exist" Jan 20 19:53:22 crc kubenswrapper[4948]: I0120 19:53:22.015684 4948 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:22 crc kubenswrapper[4948]: I0120 19:53:22.576446 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.411378 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.411615 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.414466 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.414618 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.414793 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.414931 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.415107 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.415279 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.415451 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:24 crc kubenswrapper[4948]: I0120 19:53:24.415625 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.144983 4948 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.145608 4948 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.145891 4948 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.146146 4948 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.146405 4948 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.146436 4948 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.146666 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" interval="200ms" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.298850 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" containerName="registry" containerID="cri-o://6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2" gracePeriod=30 Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.347954 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" interval="400ms" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.736540 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.737775 4948 status_manager.go:851] "Failed to get status for pod" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-bwm86\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.738043 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.738257 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.738458 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.738662 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.749394 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" interval="800ms" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.782083 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-tls\") pod \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.782145 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-bound-sa-token\") pod \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.782213 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d9173bf0-5a37-423e-94e7-7496bd69f2ee-ca-trust-extracted\") pod \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.782384 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.782409 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-trusted-ca\") pod \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.782436 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d9173bf0-5a37-423e-94e7-7496bd69f2ee-installation-pull-secrets\") pod \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.782496 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-certificates\") pod \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.782529 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzk6g\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-kube-api-access-nzk6g\") pod \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\" (UID: \"d9173bf0-5a37-423e-94e7-7496bd69f2ee\") " Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.784286 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "d9173bf0-5a37-423e-94e7-7496bd69f2ee" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.784593 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "d9173bf0-5a37-423e-94e7-7496bd69f2ee" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.800342 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9173bf0-5a37-423e-94e7-7496bd69f2ee-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "d9173bf0-5a37-423e-94e7-7496bd69f2ee" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.801935 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "d9173bf0-5a37-423e-94e7-7496bd69f2ee" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.802040 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9173bf0-5a37-423e-94e7-7496bd69f2ee-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "d9173bf0-5a37-423e-94e7-7496bd69f2ee" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.802102 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-kube-api-access-nzk6g" (OuterVolumeSpecName: "kube-api-access-nzk6g") pod "d9173bf0-5a37-423e-94e7-7496bd69f2ee" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee"). InnerVolumeSpecName "kube-api-access-nzk6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.802349 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "d9173bf0-5a37-423e-94e7-7496bd69f2ee" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.803451 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "d9173bf0-5a37-423e-94e7-7496bd69f2ee" (UID: "d9173bf0-5a37-423e-94e7-7496bd69f2ee"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.885113 4948 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d9173bf0-5a37-423e-94e7-7496bd69f2ee-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.885160 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.885174 4948 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d9173bf0-5a37-423e-94e7-7496bd69f2ee-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.885189 4948 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.885201 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzk6g\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-kube-api-access-nzk6g\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.885212 4948 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.885222 4948 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d9173bf0-5a37-423e-94e7-7496bd69f2ee-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.908988 4948 generic.go:334] "Generic (PLEG): container finished" podID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" containerID="6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2" exitCode=0 Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.909052 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" event={"ID":"d9173bf0-5a37-423e-94e7-7496bd69f2ee","Type":"ContainerDied","Data":"6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2"} Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.909086 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" event={"ID":"d9173bf0-5a37-423e-94e7-7496bd69f2ee","Type":"ContainerDied","Data":"0a3370b3da01f40da79f4717b7cec1b307052ec393d94db366758841905ec6c0"} Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.909118 4948 scope.go:117] "RemoveContainer" containerID="6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.909357 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.911041 4948 status_manager.go:851] "Failed to get status for pod" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-bwm86\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.911347 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.911622 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.918761 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.922666 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.925769 4948 status_manager.go:851] "Failed to get status for pod" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-bwm86\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.926074 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.926368 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.926430 4948 scope.go:117] "RemoveContainer" containerID="6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2" Jan 20 19:53:26 crc kubenswrapper[4948]: E0120 19:53:26.927066 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2\": container with ID starting with 6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2 not found: ID does not exist" containerID="6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.927147 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2"} err="failed to get container status \"6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2\": rpc error: code = NotFound desc = could not find container \"6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2\": container with ID starting with 6e2a1589aad31fe06d948eb4733bcb50d62eca7a599333222f3628d17ee187d2 not found: ID does not exist" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.927277 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:26 crc kubenswrapper[4948]: I0120 19:53:26.927555 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:27 crc kubenswrapper[4948]: E0120 19:53:27.550562 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" interval="1.6s" Jan 20 19:53:29 crc kubenswrapper[4948]: E0120 19:53:29.151912 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" interval="3.2s" Jan 20 19:53:29 crc kubenswrapper[4948]: E0120 19:53:29.677691 4948 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.180:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188c88774edf314b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-20 19:53:19.655498059 +0000 UTC m=+227.606223028,LastTimestamp:2026-01-20 19:53:19.655498059 +0000 UTC m=+227.606223028,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 20 19:53:32 crc kubenswrapper[4948]: E0120 19:53:32.353694 4948 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.180:6443: connect: connection refused" interval="6.4s" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.572335 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.573086 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.573458 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.573866 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.574284 4948 status_manager.go:851] "Failed to get status for pod" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-bwm86\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.574687 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.575012 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.575253 4948 status_manager.go:851] "Failed to get status for pod" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-bwm86\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.575447 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.575668 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.576792 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.586915 4948 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.586953 4948 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:32 crc kubenswrapper[4948]: E0120 19:53:32.587476 4948 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.588062 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:32 crc kubenswrapper[4948]: W0120 19:53:32.608665 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-b63540e8cde5b6e334d52e7ff3f670ffaffbed3a9f81b9e02b8769fbd126f8cd WatchSource:0}: Error finding container b63540e8cde5b6e334d52e7ff3f670ffaffbed3a9f81b9e02b8769fbd126f8cd: Status 404 returned error can't find the container with id b63540e8cde5b6e334d52e7ff3f670ffaffbed3a9f81b9e02b8769fbd126f8cd Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.953905 4948 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="6f19c14303693e92dcd6597bde2716bbe917d0bd0c3184ee5142f6c68e024fdc" exitCode=0 Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.954063 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"6f19c14303693e92dcd6597bde2716bbe917d0bd0c3184ee5142f6c68e024fdc"} Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.954116 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b63540e8cde5b6e334d52e7ff3f670ffaffbed3a9f81b9e02b8769fbd126f8cd"} Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.954487 4948 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.954524 4948 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:32 crc kubenswrapper[4948]: E0120 19:53:32.955010 4948 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.955243 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.955777 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.956006 4948 status_manager.go:851] "Failed to get status for pod" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-bwm86\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.956367 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.956663 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.958039 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.958097 4948 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf" exitCode=1 Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.958128 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf"} Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.958467 4948 scope.go:117] "RemoveContainer" containerID="5da2f9d9b59d9840fef878bbaa5fc04ce4b14751db4e05d1709e831d703104cf" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.958725 4948 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.959013 4948 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.959264 4948 status_manager.go:851] "Failed to get status for pod" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" pod="openshift-image-registry/image-registry-697d97f7c8-bwm86" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/pods/image-registry-697d97f7c8-bwm86\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.959502 4948 status_manager.go:851] "Failed to get status for pod" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-xg4hv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.959762 4948 status_manager.go:851] "Failed to get status for pod" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" pod="openshift-marketplace/certified-operators-cpztv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-cpztv\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:32 crc kubenswrapper[4948]: I0120 19:53:32.960106 4948 status_manager.go:851] "Failed to get status for pod" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.180:6443: connect: connection refused" Jan 20 19:53:33 crc kubenswrapper[4948]: I0120 19:53:33.974141 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4b1d3c3ec2be38e743d7e8eeccd1e558f081d04414bb9f5c3f770ad5e2edfe27"} Jan 20 19:53:33 crc kubenswrapper[4948]: I0120 19:53:33.974498 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"82a05b40ea1e9fc8164c2a56e8c33b970b8c0bb06aa8a03d189136aa32a886b8"} Jan 20 19:53:33 crc kubenswrapper[4948]: I0120 19:53:33.974515 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"34762061d6ae4b7bd90478725b6e715edf9e61e46bfe48cc531e6e35491e9c20"} Jan 20 19:53:33 crc kubenswrapper[4948]: I0120 19:53:33.974526 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2eb3b35df82d5866db2f168dd66a4d52ad2c772dc02041dc3f938c6afcda04cc"} Jan 20 19:53:33 crc kubenswrapper[4948]: I0120 19:53:33.983766 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 20 19:53:33 crc kubenswrapper[4948]: I0120 19:53:33.983835 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0e16abed8c377c70cd59c74fc4af470ac7d9aa46e096f28f2154702e0c7e3dcb"} Jan 20 19:53:34 crc kubenswrapper[4948]: I0120 19:53:34.997089 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5d6165372b3c83b8cce41d52aac07e2c5d91f938a72f0d8237648e1b15987d6d"} Jan 20 19:53:34 crc kubenswrapper[4948]: I0120 19:53:34.997299 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:34 crc kubenswrapper[4948]: I0120 19:53:34.997370 4948 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:34 crc kubenswrapper[4948]: I0120 19:53:34.997398 4948 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:36 crc kubenswrapper[4948]: I0120 19:53:36.332470 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 19:53:37 crc kubenswrapper[4948]: I0120 19:53:37.588918 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:37 crc kubenswrapper[4948]: I0120 19:53:37.589005 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:37 crc kubenswrapper[4948]: I0120 19:53:37.595340 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:38 crc kubenswrapper[4948]: I0120 19:53:38.561918 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 19:53:38 crc kubenswrapper[4948]: I0120 19:53:38.566229 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 19:53:40 crc kubenswrapper[4948]: I0120 19:53:40.016786 4948 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:40 crc kubenswrapper[4948]: I0120 19:53:40.065641 4948 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:40 crc kubenswrapper[4948]: I0120 19:53:40.065678 4948 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:40 crc kubenswrapper[4948]: I0120 19:53:40.075397 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:41 crc kubenswrapper[4948]: I0120 19:53:41.071046 4948 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:41 crc kubenswrapper[4948]: I0120 19:53:41.071651 4948 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="b5e2c458-c544-45d1-ac7b-da99352dce17" Jan 20 19:53:42 crc kubenswrapper[4948]: I0120 19:53:42.589491 4948 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="047ad209-36c1-4166-83f6-5276a2d559ca" Jan 20 19:53:46 crc kubenswrapper[4948]: I0120 19:53:46.338388 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 20 19:53:50 crc kubenswrapper[4948]: I0120 19:53:50.092271 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 20 19:53:50 crc kubenswrapper[4948]: I0120 19:53:50.272989 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 20 19:53:50 crc kubenswrapper[4948]: I0120 19:53:50.502591 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 20 19:53:50 crc kubenswrapper[4948]: I0120 19:53:50.861414 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 20 19:53:50 crc kubenswrapper[4948]: I0120 19:53:50.906098 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 20 19:53:50 crc kubenswrapper[4948]: I0120 19:53:50.940138 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 20 19:53:51 crc kubenswrapper[4948]: I0120 19:53:51.082246 4948 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 20 19:53:51 crc kubenswrapper[4948]: I0120 19:53:51.110233 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 20 19:53:51 crc kubenswrapper[4948]: I0120 19:53:51.479074 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 20 19:53:51 crc kubenswrapper[4948]: I0120 19:53:51.518057 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 20 19:53:51 crc kubenswrapper[4948]: I0120 19:53:51.741012 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 20 19:53:51 crc kubenswrapper[4948]: I0120 19:53:51.776210 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 20 19:53:51 crc kubenswrapper[4948]: I0120 19:53:51.935566 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 20 19:53:51 crc kubenswrapper[4948]: I0120 19:53:51.948579 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.080782 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.383303 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.387554 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.388988 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.410085 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.451461 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.510174 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.719627 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.720921 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.925750 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 20 19:53:52 crc kubenswrapper[4948]: I0120 19:53:52.940997 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.189848 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.238886 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.330917 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.378069 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.491077 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.501946 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.608657 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.651389 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.761273 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.813823 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.822509 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.845513 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.933575 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 20 19:53:53 crc kubenswrapper[4948]: I0120 19:53:53.933654 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.040638 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.095248 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.095733 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.097429 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.097681 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.359307 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.365526 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.375138 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.571769 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.722306 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.769903 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 20 19:53:54 crc kubenswrapper[4948]: I0120 19:53:54.932516 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.027597 4948 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.071802 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.109002 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.259179 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.259721 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.289333 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.334471 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.368391 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.509533 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.523840 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.656331 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.671242 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.676103 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.808661 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.819786 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 20 19:53:55 crc kubenswrapper[4948]: I0120 19:53:55.902087 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.272075 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.292783 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.317622 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.378748 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.382912 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.402906 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.407939 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.653761 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.662490 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.702824 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.734122 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.787132 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.834697 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.834773 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.842738 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.895691 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.950758 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.974662 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 20 19:53:56 crc kubenswrapper[4948]: I0120 19:53:56.982021 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.026341 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.035448 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.164004 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.198729 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.305394 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.382424 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.434464 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.497885 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.655747 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.657830 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.745672 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.801079 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.831787 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.856268 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 20 19:53:57 crc kubenswrapper[4948]: I0120 19:53:57.909276 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.117024 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.133907 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.168149 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.244172 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.287393 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.358336 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.474381 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.481110 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.562853 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.571407 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.582785 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.611310 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.698815 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.776107 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.778787 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.785430 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.832144 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.853592 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.918464 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.947271 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.975876 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 20 19:53:58 crc kubenswrapper[4948]: I0120 19:53:58.976569 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.123684 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.161490 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.165909 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.243324 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.255392 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.318346 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.338985 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.369665 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.376947 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.383319 4948 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.392795 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=40.392775161 podStartE2EDuration="40.392775161s" podCreationTimestamp="2026-01-20 19:53:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:53:40.031441074 +0000 UTC m=+247.982166053" watchObservedRunningTime="2026-01-20 19:53:59.392775161 +0000 UTC m=+267.343500120" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.394031 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-image-registry/image-registry-697d97f7c8-bwm86"] Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.394084 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.402355 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.419715 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=19.419677488 podStartE2EDuration="19.419677488s" podCreationTimestamp="2026-01-20 19:53:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:53:59.417246317 +0000 UTC m=+267.367971306" watchObservedRunningTime="2026-01-20 19:53:59.419677488 +0000 UTC m=+267.370402457" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.506678 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.563773 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.590732 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.617233 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.735214 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.761629 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.826340 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.954023 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.974568 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 20 19:53:59 crc kubenswrapper[4948]: I0120 19:53:59.999513 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.004831 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.010038 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.019795 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.027641 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.173296 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.228605 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.232520 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.239576 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.242651 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.261798 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.344371 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.375787 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.445402 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.498429 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.551975 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.583322 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" path="/var/lib/kubelet/pods/d9173bf0-5a37-423e-94e7-7496bd69f2ee/volumes" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.603987 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.664431 4948 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.703398 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.769245 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.792273 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.811424 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.823225 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.840841 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.882972 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.899764 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.901344 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.938627 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 20 19:54:00 crc kubenswrapper[4948]: I0120 19:54:00.939829 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.107362 4948 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.148684 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.307976 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.372484 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.427874 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.457555 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.532559 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.536188 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.551055 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.570029 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.621975 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.648481 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.661332 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.789816 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.799638 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.900757 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 20 19:54:01 crc kubenswrapper[4948]: I0120 19:54:01.935897 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.018656 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.034807 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.035901 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.176832 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.212825 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.361900 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.433871 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.444062 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.451067 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.490829 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.493676 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.495517 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.504078 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.586048 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.616817 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.719343 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.740500 4948 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.740858 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://d7a99c8c94dad8536c1e3d8e0cf88572f821c9483561a0294662b421e87667b4" gracePeriod=5 Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.814134 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.841410 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.956445 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.959495 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 20 19:54:02 crc kubenswrapper[4948]: I0120 19:54:02.985360 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.017958 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.066873 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.133821 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.135900 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.149629 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.151521 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.204399 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.226378 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.295081 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.590368 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.783111 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.792097 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 20 19:54:03 crc kubenswrapper[4948]: I0120 19:54:03.999011 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.095886 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.260325 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.279194 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.297222 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.381340 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.504925 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.600195 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.728684 4948 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.747489 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.789886 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 20 19:54:04 crc kubenswrapper[4948]: I0120 19:54:04.823730 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 20 19:54:05 crc kubenswrapper[4948]: I0120 19:54:05.098469 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 20 19:54:05 crc kubenswrapper[4948]: I0120 19:54:05.286510 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 20 19:54:05 crc kubenswrapper[4948]: I0120 19:54:05.453782 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 20 19:54:05 crc kubenswrapper[4948]: I0120 19:54:05.524395 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 20 19:54:05 crc kubenswrapper[4948]: I0120 19:54:05.536885 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 20 19:54:05 crc kubenswrapper[4948]: I0120 19:54:05.676474 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 20 19:54:05 crc kubenswrapper[4948]: I0120 19:54:05.857523 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 20 19:54:05 crc kubenswrapper[4948]: I0120 19:54:05.899342 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 20 19:54:06 crc kubenswrapper[4948]: I0120 19:54:06.069968 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 20 19:54:06 crc kubenswrapper[4948]: I0120 19:54:06.230249 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 20 19:54:06 crc kubenswrapper[4948]: I0120 19:54:06.382374 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 20 19:54:06 crc kubenswrapper[4948]: I0120 19:54:06.384630 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 20 19:54:06 crc kubenswrapper[4948]: I0120 19:54:06.448514 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 20 19:54:07 crc kubenswrapper[4948]: I0120 19:54:07.025943 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 20 19:54:07 crc kubenswrapper[4948]: I0120 19:54:07.066653 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 20 19:54:07 crc kubenswrapper[4948]: I0120 19:54:07.196299 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 20 19:54:07 crc kubenswrapper[4948]: I0120 19:54:07.280611 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 20 19:54:07 crc kubenswrapper[4948]: I0120 19:54:07.922405 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.192513 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.224628 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.224675 4948 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="d7a99c8c94dad8536c1e3d8e0cf88572f821c9483561a0294662b421e87667b4" exitCode=137 Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.327743 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.327825 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421293 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421341 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421391 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421406 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421459 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421495 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421504 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421560 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421652 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421897 4948 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421914 4948 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421925 4948 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.421935 4948 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.433913 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.523562 4948 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.577752 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.578057 4948 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.587558 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.587592 4948 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="c379480c-57cb-4898-8b71-24636b967fa9" Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.590565 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 20 19:54:08 crc kubenswrapper[4948]: I0120 19:54:08.590606 4948 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="c379480c-57cb-4898-8b71-24636b967fa9" Jan 20 19:54:09 crc kubenswrapper[4948]: I0120 19:54:09.230277 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 20 19:54:09 crc kubenswrapper[4948]: I0120 19:54:09.230342 4948 scope.go:117] "RemoveContainer" containerID="d7a99c8c94dad8536c1e3d8e0cf88572f821c9483561a0294662b421e87667b4" Jan 20 19:54:09 crc kubenswrapper[4948]: I0120 19:54:09.230458 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 20 19:54:25 crc kubenswrapper[4948]: I0120 19:54:25.336834 4948 generic.go:334] "Generic (PLEG): container finished" podID="7cf25c7d-e351-4a2e-8992-47542811fb1f" containerID="648d0751e6ca0869747efc4dab3723b1746735080e4a0ef47ce408aaa4545e5f" exitCode=0 Jan 20 19:54:25 crc kubenswrapper[4948]: I0120 19:54:25.336937 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" event={"ID":"7cf25c7d-e351-4a2e-8992-47542811fb1f","Type":"ContainerDied","Data":"648d0751e6ca0869747efc4dab3723b1746735080e4a0ef47ce408aaa4545e5f"} Jan 20 19:54:25 crc kubenswrapper[4948]: I0120 19:54:25.339374 4948 scope.go:117] "RemoveContainer" containerID="648d0751e6ca0869747efc4dab3723b1746735080e4a0ef47ce408aaa4545e5f" Jan 20 19:54:26 crc kubenswrapper[4948]: I0120 19:54:26.349912 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" event={"ID":"7cf25c7d-e351-4a2e-8992-47542811fb1f","Type":"ContainerStarted","Data":"0548e3c7efa0a8a375e8f21221ca9731d096050013114a078e412b81a18c61e6"} Jan 20 19:54:26 crc kubenswrapper[4948]: I0120 19:54:26.351067 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:54:26 crc kubenswrapper[4948]: I0120 19:54:26.353322 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-z8fwl" Jan 20 19:54:32 crc kubenswrapper[4948]: I0120 19:54:32.397680 4948 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 20 19:55:20 crc kubenswrapper[4948]: I0120 19:55:20.250248 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:55:20 crc kubenswrapper[4948]: I0120 19:55:20.250781 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:55:50 crc kubenswrapper[4948]: I0120 19:55:50.250667 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:55:50 crc kubenswrapper[4948]: I0120 19:55:50.251556 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:56:20 crc kubenswrapper[4948]: I0120 19:56:20.250565 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:56:20 crc kubenswrapper[4948]: I0120 19:56:20.252745 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:56:20 crc kubenswrapper[4948]: I0120 19:56:20.253013 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:56:20 crc kubenswrapper[4948]: I0120 19:56:20.255454 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"615f93555b1b0a9ccd007e1b86dbe692ba729e13c19eaa173e866087cfea406b"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:56:20 crc kubenswrapper[4948]: I0120 19:56:20.255598 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://615f93555b1b0a9ccd007e1b86dbe692ba729e13c19eaa173e866087cfea406b" gracePeriod=600 Jan 20 19:56:21 crc kubenswrapper[4948]: I0120 19:56:21.226434 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="615f93555b1b0a9ccd007e1b86dbe692ba729e13c19eaa173e866087cfea406b" exitCode=0 Jan 20 19:56:21 crc kubenswrapper[4948]: I0120 19:56:21.226517 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"615f93555b1b0a9ccd007e1b86dbe692ba729e13c19eaa173e866087cfea406b"} Jan 20 19:56:21 crc kubenswrapper[4948]: I0120 19:56:21.226789 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"e049e149f0a0dc1b1b363bfb2d9bdbd795da8ca2d31406285050192b1751620d"} Jan 20 19:56:21 crc kubenswrapper[4948]: I0120 19:56:21.226812 4948 scope.go:117] "RemoveContainer" containerID="e8cf33f80144d59bd734348101f570a3604e68bede5fdd1116b7015dd791d185" Jan 20 19:57:40 crc kubenswrapper[4948]: I0120 19:57:40.474518 4948 scope.go:117] "RemoveContainer" containerID="ce353bdbe0534364d302c134c9172525fcb75e3a0a2a4555979ccf5aaffd67a7" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.114651 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-82hbd"] Jan 20 19:58:13 crc kubenswrapper[4948]: E0120 19:58:13.116247 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" containerName="installer" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.116315 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" containerName="installer" Jan 20 19:58:13 crc kubenswrapper[4948]: E0120 19:58:13.116373 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" containerName="registry" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.116460 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" containerName="registry" Jan 20 19:58:13 crc kubenswrapper[4948]: E0120 19:58:13.116526 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.116578 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.116730 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bce8cba-e89c-4a8a-b261-ad8bae824ec9" containerName="installer" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.116806 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9173bf0-5a37-423e-94e7-7496bd69f2ee" containerName="registry" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.116873 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.117337 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-82hbd" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.137964 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lm6w\" (UniqueName: \"kubernetes.io/projected/1973fd2f-85c7-4fbb-92b0-0973744d9d00-kube-api-access-5lm6w\") pod \"cert-manager-cainjector-cf98fcc89-82hbd\" (UID: \"1973fd2f-85c7-4fbb-92b0-0973744d9d00\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-82hbd" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.141232 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.141316 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.141371 4948 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-cfwb2" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.163635 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-dt9ht"] Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.164636 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-dt9ht" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.175633 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-82hbd"] Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.179966 4948 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-nhxvx" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.193260 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fckz7"] Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.194149 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.199481 4948 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-5vbwk" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.210648 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-dt9ht"] Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.225560 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fckz7"] Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.244163 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lm6w\" (UniqueName: \"kubernetes.io/projected/1973fd2f-85c7-4fbb-92b0-0973744d9d00-kube-api-access-5lm6w\") pod \"cert-manager-cainjector-cf98fcc89-82hbd\" (UID: \"1973fd2f-85c7-4fbb-92b0-0973744d9d00\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-82hbd" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.311503 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lm6w\" (UniqueName: \"kubernetes.io/projected/1973fd2f-85c7-4fbb-92b0-0973744d9d00-kube-api-access-5lm6w\") pod \"cert-manager-cainjector-cf98fcc89-82hbd\" (UID: \"1973fd2f-85c7-4fbb-92b0-0973744d9d00\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-82hbd" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.345565 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvrh6\" (UniqueName: \"kubernetes.io/projected/0a4be8e0-f8af-4f0d-8230-37fd71e2cc81-kube-api-access-fvrh6\") pod \"cert-manager-858654f9db-dt9ht\" (UID: \"0a4be8e0-f8af-4f0d-8230-37fd71e2cc81\") " pod="cert-manager/cert-manager-858654f9db-dt9ht" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.345684 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7thln\" (UniqueName: \"kubernetes.io/projected/5474f4e5-fa0d-4931-b732-4a1d0e06c858-kube-api-access-7thln\") pod \"cert-manager-webhook-687f57d79b-fckz7\" (UID: \"5474f4e5-fa0d-4931-b732-4a1d0e06c858\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.446986 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvrh6\" (UniqueName: \"kubernetes.io/projected/0a4be8e0-f8af-4f0d-8230-37fd71e2cc81-kube-api-access-fvrh6\") pod \"cert-manager-858654f9db-dt9ht\" (UID: \"0a4be8e0-f8af-4f0d-8230-37fd71e2cc81\") " pod="cert-manager/cert-manager-858654f9db-dt9ht" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.447360 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7thln\" (UniqueName: \"kubernetes.io/projected/5474f4e5-fa0d-4931-b732-4a1d0e06c858-kube-api-access-7thln\") pod \"cert-manager-webhook-687f57d79b-fckz7\" (UID: \"5474f4e5-fa0d-4931-b732-4a1d0e06c858\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.468670 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7thln\" (UniqueName: \"kubernetes.io/projected/5474f4e5-fa0d-4931-b732-4a1d0e06c858-kube-api-access-7thln\") pod \"cert-manager-webhook-687f57d79b-fckz7\" (UID: \"5474f4e5-fa0d-4931-b732-4a1d0e06c858\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.469318 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvrh6\" (UniqueName: \"kubernetes.io/projected/0a4be8e0-f8af-4f0d-8230-37fd71e2cc81-kube-api-access-fvrh6\") pod \"cert-manager-858654f9db-dt9ht\" (UID: \"0a4be8e0-f8af-4f0d-8230-37fd71e2cc81\") " pod="cert-manager/cert-manager-858654f9db-dt9ht" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.474525 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-82hbd" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.485479 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-dt9ht" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.550766 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.780653 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-82hbd"] Jan 20 19:58:13 crc kubenswrapper[4948]: W0120 19:58:13.790994 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1973fd2f_85c7_4fbb_92b0_0973744d9d00.slice/crio-5ee791d136a7ca930d6af4ed8b1f1912424153b1b77c0f6a4f999a688ed7346c WatchSource:0}: Error finding container 5ee791d136a7ca930d6af4ed8b1f1912424153b1b77c0f6a4f999a688ed7346c: Status 404 returned error can't find the container with id 5ee791d136a7ca930d6af4ed8b1f1912424153b1b77c0f6a4f999a688ed7346c Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.793018 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.821060 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-dt9ht"] Jan 20 19:58:13 crc kubenswrapper[4948]: W0120 19:58:13.828334 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a4be8e0_f8af_4f0d_8230_37fd71e2cc81.slice/crio-4d9601cb8dad0ba1f352763ec985644889b0439b945703512fb09de34415053c WatchSource:0}: Error finding container 4d9601cb8dad0ba1f352763ec985644889b0439b945703512fb09de34415053c: Status 404 returned error can't find the container with id 4d9601cb8dad0ba1f352763ec985644889b0439b945703512fb09de34415053c Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.866487 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fckz7"] Jan 20 19:58:13 crc kubenswrapper[4948]: W0120 19:58:13.877895 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5474f4e5_fa0d_4931_b732_4a1d0e06c858.slice/crio-789c1748ee822d855beeb427c3472d53f3b2b9548115c94a5661eeb5985685c1 WatchSource:0}: Error finding container 789c1748ee822d855beeb427c3472d53f3b2b9548115c94a5661eeb5985685c1: Status 404 returned error can't find the container with id 789c1748ee822d855beeb427c3472d53f3b2b9548115c94a5661eeb5985685c1 Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.902079 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-dt9ht" event={"ID":"0a4be8e0-f8af-4f0d-8230-37fd71e2cc81","Type":"ContainerStarted","Data":"4d9601cb8dad0ba1f352763ec985644889b0439b945703512fb09de34415053c"} Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.902931 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" event={"ID":"5474f4e5-fa0d-4931-b732-4a1d0e06c858","Type":"ContainerStarted","Data":"789c1748ee822d855beeb427c3472d53f3b2b9548115c94a5661eeb5985685c1"} Jan 20 19:58:13 crc kubenswrapper[4948]: I0120 19:58:13.904244 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-82hbd" event={"ID":"1973fd2f-85c7-4fbb-92b0-0973744d9d00","Type":"ContainerStarted","Data":"5ee791d136a7ca930d6af4ed8b1f1912424153b1b77c0f6a4f999a688ed7346c"} Jan 20 19:58:18 crc kubenswrapper[4948]: I0120 19:58:18.942665 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-82hbd" event={"ID":"1973fd2f-85c7-4fbb-92b0-0973744d9d00","Type":"ContainerStarted","Data":"56227e8ec7e60fe5b2cd1d5cd86988a52351877e1d04534e3ded7b4d35906e5b"} Jan 20 19:58:18 crc kubenswrapper[4948]: I0120 19:58:18.944441 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-dt9ht" event={"ID":"0a4be8e0-f8af-4f0d-8230-37fd71e2cc81","Type":"ContainerStarted","Data":"38905adfee17f80b96831c8fe747a43bf214c67f8594ccef14affed2262cc26d"} Jan 20 19:58:18 crc kubenswrapper[4948]: I0120 19:58:18.945962 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" event={"ID":"5474f4e5-fa0d-4931-b732-4a1d0e06c858","Type":"ContainerStarted","Data":"ce418ffede57a22894552a8232ec41eb24724891568b771d1a023a71d1bab309"} Jan 20 19:58:18 crc kubenswrapper[4948]: I0120 19:58:18.946631 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" Jan 20 19:58:18 crc kubenswrapper[4948]: I0120 19:58:18.995120 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-82hbd" podStartSLOduration=1.226997181 podStartE2EDuration="5.99510485s" podCreationTimestamp="2026-01-20 19:58:13 +0000 UTC" firstStartedPulling="2026-01-20 19:58:13.792701785 +0000 UTC m=+521.743426754" lastFinishedPulling="2026-01-20 19:58:18.560809454 +0000 UTC m=+526.511534423" observedRunningTime="2026-01-20 19:58:18.992474806 +0000 UTC m=+526.943199775" watchObservedRunningTime="2026-01-20 19:58:18.99510485 +0000 UTC m=+526.945829819" Jan 20 19:58:19 crc kubenswrapper[4948]: I0120 19:58:19.040499 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" podStartSLOduration=1.313113475 podStartE2EDuration="6.040478772s" podCreationTimestamp="2026-01-20 19:58:13 +0000 UTC" firstStartedPulling="2026-01-20 19:58:13.880353132 +0000 UTC m=+521.831078101" lastFinishedPulling="2026-01-20 19:58:18.607718429 +0000 UTC m=+526.558443398" observedRunningTime="2026-01-20 19:58:19.019508804 +0000 UTC m=+526.970233773" watchObservedRunningTime="2026-01-20 19:58:19.040478772 +0000 UTC m=+526.991203741" Jan 20 19:58:19 crc kubenswrapper[4948]: I0120 19:58:19.042513 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-dt9ht" podStartSLOduration=1.272400714 podStartE2EDuration="6.042507589s" podCreationTimestamp="2026-01-20 19:58:13 +0000 UTC" firstStartedPulling="2026-01-20 19:58:13.830011981 +0000 UTC m=+521.780736950" lastFinishedPulling="2026-01-20 19:58:18.600118856 +0000 UTC m=+526.550843825" observedRunningTime="2026-01-20 19:58:19.039280149 +0000 UTC m=+526.990005118" watchObservedRunningTime="2026-01-20 19:58:19.042507589 +0000 UTC m=+526.993232558" Jan 20 19:58:20 crc kubenswrapper[4948]: I0120 19:58:20.250546 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:58:20 crc kubenswrapper[4948]: I0120 19:58:20.251093 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.797607 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rtkhq"] Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.798057 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="nbdb" containerID="cri-o://2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82" gracePeriod=30 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.798163 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="sbdb" containerID="cri-o://d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737" gracePeriod=30 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.798285 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a" gracePeriod=30 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.798408 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="northd" containerID="cri-o://93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d" gracePeriod=30 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.798465 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kube-rbac-proxy-node" containerID="cri-o://9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7" gracePeriod=30 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.798519 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovn-acl-logging" containerID="cri-o://67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f" gracePeriod=30 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.798026 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovn-controller" containerID="cri-o://74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0" gracePeriod=30 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.846597 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" containerID="cri-o://7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331" gracePeriod=30 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.972345 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/1.log" Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.972748 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/0.log" Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.972780 4948 generic.go:334] "Generic (PLEG): container finished" podID="e21ac8a2-1e79-4191-b809-75085d432b31" containerID="b41d2a53810cfb4c072af0d88429759b11509193add1fb0f10d77de4d747b8b4" exitCode=2 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.972827 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qttfm" event={"ID":"e21ac8a2-1e79-4191-b809-75085d432b31","Type":"ContainerDied","Data":"b41d2a53810cfb4c072af0d88429759b11509193add1fb0f10d77de4d747b8b4"} Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.972858 4948 scope.go:117] "RemoveContainer" containerID="9aeda225c938c45a07e57097c3149acf1cd6e7e713ad3e9311352714f6af3f36" Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.973282 4948 scope.go:117] "RemoveContainer" containerID="b41d2a53810cfb4c072af0d88429759b11509193add1fb0f10d77de4d747b8b4" Jan 20 19:58:22 crc kubenswrapper[4948]: E0120 19:58:22.973481 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-qttfm_openshift-multus(e21ac8a2-1e79-4191-b809-75085d432b31)\"" pod="openshift-multus/multus-qttfm" podUID="e21ac8a2-1e79-4191-b809-75085d432b31" Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.980512 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/2.log" Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.984177 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovn-acl-logging/0.log" Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.984815 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovn-controller/0.log" Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.986630 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a" exitCode=0 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.986683 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7" exitCode=0 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.986694 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f" exitCode=143 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.986717 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0" exitCode=143 Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.986741 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a"} Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.986773 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7"} Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.986785 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f"} Jan 20 19:58:22 crc kubenswrapper[4948]: I0120 19:58:22.986796 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0"} Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.146926 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/2.log" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.149258 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovn-acl-logging/0.log" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.149655 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovn-controller/0.log" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.150067 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.204914 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-5f676"] Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205240 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205268 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205281 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kube-rbac-proxy-node" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205291 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kube-rbac-proxy-node" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205309 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205318 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205328 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="nbdb" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205336 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="nbdb" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205351 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kube-rbac-proxy-ovn-metrics" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205361 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kube-rbac-proxy-ovn-metrics" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205382 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovn-acl-logging" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205391 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovn-acl-logging" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205406 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kubecfg-setup" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205414 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kubecfg-setup" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205424 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205433 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205444 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="northd" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205453 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="northd" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205465 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovn-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205473 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovn-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205483 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="sbdb" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205491 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="sbdb" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205631 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="northd" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205646 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205655 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kube-rbac-proxy-ovn-metrics" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205667 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="kube-rbac-proxy-node" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205678 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205688 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205764 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="sbdb" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205777 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovn-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205790 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="nbdb" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205801 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovn-acl-logging" Jan 20 19:58:23 crc kubenswrapper[4948]: E0120 19:58:23.205931 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.205941 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.206071 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerName="ovnkube-controller" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.208344 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344659 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-env-overrides\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344733 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-openvswitch\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344774 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-var-lib-openvswitch\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344825 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-systemd-units\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344876 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344921 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55f6g\" (UniqueName: \"kubernetes.io/projected/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-kube-api-access-55f6g\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344944 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344954 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.344996 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-etc-openvswitch\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345025 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-var-lib-cni-networks-ovn-kubernetes\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345053 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-node-log\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345074 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345088 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-bin\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345103 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345126 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-ovn-kubernetes\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345150 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-netns\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345131 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-node-log" (OuterVolumeSpecName: "node-log") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345145 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345163 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345170 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-netd\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345193 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345215 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-slash\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345231 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345247 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovn-node-metrics-cert\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345257 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-slash" (OuterVolumeSpecName: "host-slash") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345261 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-log-socket\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345254 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345300 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-kubelet\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345340 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-config\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345276 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-log-socket" (OuterVolumeSpecName: "log-socket") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345316 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345359 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-systemd\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345413 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-script-lib\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345436 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-ovn\") pod \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\" (UID: \"b00db8b2-f5fb-476f-bfc1-95c125fdaaac\") " Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345621 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-kubelet\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345659 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345665 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-var-lib-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345695 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345748 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovn-node-metrics-cert\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345787 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovnkube-script-lib\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345897 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-cni-netd\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345925 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-etc-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345938 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345948 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-run-netns\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345969 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-slash\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.345984 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346054 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-systemd\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346126 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-ovn\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346176 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-run-ovn-kubernetes\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346208 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346241 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-cni-bin\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346262 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-node-log\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346301 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-log-socket\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346373 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-systemd-units\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346405 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhdn9\" (UniqueName: \"kubernetes.io/projected/4ed29cf1-d076-41a3-8ad1-438db91ad979-kube-api-access-bhdn9\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346419 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-env-overrides\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346443 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovnkube-config\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346489 4948 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346500 4948 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346509 4948 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-node-log\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346519 4948 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346530 4948 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346540 4948 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346551 4948 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346561 4948 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-slash\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346571 4948 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-log-socket\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346582 4948 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346591 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346601 4948 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346611 4948 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346620 4948 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346628 4948 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346636 4948 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.346644 4948 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.350008 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-kube-api-access-55f6g" (OuterVolumeSpecName: "kube-api-access-55f6g") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "kube-api-access-55f6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.350645 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.363859 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "b00db8b2-f5fb-476f-bfc1-95c125fdaaac" (UID: "b00db8b2-f5fb-476f-bfc1-95c125fdaaac"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447268 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-run-ovn-kubernetes\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447330 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447353 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-cni-bin\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447372 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-node-log\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447413 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-log-socket\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447420 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-run-ovn-kubernetes\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447442 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447489 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-cni-bin\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447489 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-node-log\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447453 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-systemd-units\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447498 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-systemd-units\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447499 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-log-socket\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447693 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-env-overrides\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.447806 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhdn9\" (UniqueName: \"kubernetes.io/projected/4ed29cf1-d076-41a3-8ad1-438db91ad979-kube-api-access-bhdn9\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.448308 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-env-overrides\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.448425 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovnkube-config\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.449201 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovnkube-config\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.449330 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-kubelet\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.449416 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-kubelet\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.449483 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-var-lib-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.449568 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-var-lib-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.449643 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.449753 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.449840 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovn-node-metrics-cert\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.450788 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovnkube-script-lib\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.450885 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-cni-netd\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.450965 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-etc-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.450984 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-cni-netd\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451039 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-run-netns\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451102 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-etc-openvswitch\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451076 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-slash\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451138 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-run-netns\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451237 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-host-slash\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451261 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-systemd\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451343 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-systemd\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451361 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-ovn\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451493 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4ed29cf1-d076-41a3-8ad1-438db91ad979-run-ovn\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451734 4948 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451756 4948 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451772 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55f6g\" (UniqueName: \"kubernetes.io/projected/b00db8b2-f5fb-476f-bfc1-95c125fdaaac-kube-api-access-55f6g\") on node \"crc\" DevicePath \"\"" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.451837 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovnkube-script-lib\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.455896 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4ed29cf1-d076-41a3-8ad1-438db91ad979-ovn-node-metrics-cert\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.466403 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhdn9\" (UniqueName: \"kubernetes.io/projected/4ed29cf1-d076-41a3-8ad1-438db91ad979-kube-api-access-bhdn9\") pod \"ovnkube-node-5f676\" (UID: \"4ed29cf1-d076-41a3-8ad1-438db91ad979\") " pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.521788 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:23 crc kubenswrapper[4948]: W0120 19:58:23.542931 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ed29cf1_d076_41a3_8ad1_438db91ad979.slice/crio-3f5ba6733ef888aba1014dce65d1e9454f474e60799d933a764c040db6ca9026 WatchSource:0}: Error finding container 3f5ba6733ef888aba1014dce65d1e9454f474e60799d933a764c040db6ca9026: Status 404 returned error can't find the container with id 3f5ba6733ef888aba1014dce65d1e9454f474e60799d933a764c040db6ca9026 Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.555110 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-fckz7" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.993455 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovnkube-controller/2.log" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.997869 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovn-acl-logging/0.log" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.998356 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rtkhq_b00db8b2-f5fb-476f-bfc1-95c125fdaaac/ovn-controller/0.log" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.998818 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331" exitCode=0 Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.998851 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737" exitCode=0 Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.998865 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82" exitCode=0 Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.998874 4948 generic.go:334] "Generic (PLEG): container finished" podID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" containerID="93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d" exitCode=0 Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.998940 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331"} Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.998979 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737"} Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.998995 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82"} Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.999010 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d"} Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.999023 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" event={"ID":"b00db8b2-f5fb-476f-bfc1-95c125fdaaac","Type":"ContainerDied","Data":"5d37dbd9945b60a07b3620d4062a5cdd679c3caf924483de9be86f15dbe3b8a8"} Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.999047 4948 scope.go:117] "RemoveContainer" containerID="7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331" Jan 20 19:58:23 crc kubenswrapper[4948]: I0120 19:58:23.999040 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rtkhq" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.000808 4948 generic.go:334] "Generic (PLEG): container finished" podID="4ed29cf1-d076-41a3-8ad1-438db91ad979" containerID="2012b2a3c9a6e3d652a0ba38985b1990e013c2ba2c2c31ed5bff6f285794504b" exitCode=0 Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.000869 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerDied","Data":"2012b2a3c9a6e3d652a0ba38985b1990e013c2ba2c2c31ed5bff6f285794504b"} Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.000886 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"3f5ba6733ef888aba1014dce65d1e9454f474e60799d933a764c040db6ca9026"} Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.002950 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/1.log" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.025637 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.078235 4948 scope.go:117] "RemoveContainer" containerID="d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.078552 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rtkhq"] Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.082349 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rtkhq"] Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.100821 4948 scope.go:117] "RemoveContainer" containerID="2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.116749 4948 scope.go:117] "RemoveContainer" containerID="93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.130964 4948 scope.go:117] "RemoveContainer" containerID="11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.145053 4948 scope.go:117] "RemoveContainer" containerID="9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.175220 4948 scope.go:117] "RemoveContainer" containerID="67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.216369 4948 scope.go:117] "RemoveContainer" containerID="74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.274927 4948 scope.go:117] "RemoveContainer" containerID="ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.298459 4948 scope.go:117] "RemoveContainer" containerID="7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.299011 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": container with ID starting with 7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331 not found: ID does not exist" containerID="7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.299044 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331"} err="failed to get container status \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": rpc error: code = NotFound desc = could not find container \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": container with ID starting with 7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.299079 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.299449 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": container with ID starting with a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4 not found: ID does not exist" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.299469 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4"} err="failed to get container status \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": rpc error: code = NotFound desc = could not find container \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": container with ID starting with a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.299498 4948 scope.go:117] "RemoveContainer" containerID="d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.299978 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": container with ID starting with d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737 not found: ID does not exist" containerID="d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.300014 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737"} err="failed to get container status \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": rpc error: code = NotFound desc = could not find container \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": container with ID starting with d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.300041 4948 scope.go:117] "RemoveContainer" containerID="2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.300315 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": container with ID starting with 2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82 not found: ID does not exist" containerID="2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.300357 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82"} err="failed to get container status \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": rpc error: code = NotFound desc = could not find container \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": container with ID starting with 2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.300373 4948 scope.go:117] "RemoveContainer" containerID="93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.300680 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": container with ID starting with 93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d not found: ID does not exist" containerID="93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.300700 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d"} err="failed to get container status \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": rpc error: code = NotFound desc = could not find container \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": container with ID starting with 93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.300733 4948 scope.go:117] "RemoveContainer" containerID="11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.302953 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": container with ID starting with 11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a not found: ID does not exist" containerID="11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.302979 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a"} err="failed to get container status \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": rpc error: code = NotFound desc = could not find container \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": container with ID starting with 11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.302997 4948 scope.go:117] "RemoveContainer" containerID="9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.303364 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": container with ID starting with 9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7 not found: ID does not exist" containerID="9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.303409 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7"} err="failed to get container status \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": rpc error: code = NotFound desc = could not find container \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": container with ID starting with 9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.303430 4948 scope.go:117] "RemoveContainer" containerID="67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.303746 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": container with ID starting with 67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f not found: ID does not exist" containerID="67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.303765 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f"} err="failed to get container status \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": rpc error: code = NotFound desc = could not find container \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": container with ID starting with 67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.303780 4948 scope.go:117] "RemoveContainer" containerID="74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.304064 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": container with ID starting with 74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0 not found: ID does not exist" containerID="74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.304083 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0"} err="failed to get container status \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": rpc error: code = NotFound desc = could not find container \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": container with ID starting with 74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.304123 4948 scope.go:117] "RemoveContainer" containerID="ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b" Jan 20 19:58:24 crc kubenswrapper[4948]: E0120 19:58:24.304466 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": container with ID starting with ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b not found: ID does not exist" containerID="ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.304525 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b"} err="failed to get container status \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": rpc error: code = NotFound desc = could not find container \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": container with ID starting with ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.304544 4948 scope.go:117] "RemoveContainer" containerID="7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.304930 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331"} err="failed to get container status \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": rpc error: code = NotFound desc = could not find container \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": container with ID starting with 7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.304965 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.305205 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4"} err="failed to get container status \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": rpc error: code = NotFound desc = could not find container \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": container with ID starting with a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.305260 4948 scope.go:117] "RemoveContainer" containerID="d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.305578 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737"} err="failed to get container status \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": rpc error: code = NotFound desc = could not find container \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": container with ID starting with d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.305617 4948 scope.go:117] "RemoveContainer" containerID="2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.306080 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82"} err="failed to get container status \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": rpc error: code = NotFound desc = could not find container \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": container with ID starting with 2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.306098 4948 scope.go:117] "RemoveContainer" containerID="93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.306451 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d"} err="failed to get container status \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": rpc error: code = NotFound desc = could not find container \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": container with ID starting with 93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.306469 4948 scope.go:117] "RemoveContainer" containerID="11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.306811 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a"} err="failed to get container status \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": rpc error: code = NotFound desc = could not find container \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": container with ID starting with 11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.306828 4948 scope.go:117] "RemoveContainer" containerID="9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.308903 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7"} err="failed to get container status \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": rpc error: code = NotFound desc = could not find container \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": container with ID starting with 9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.308945 4948 scope.go:117] "RemoveContainer" containerID="67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.309316 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f"} err="failed to get container status \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": rpc error: code = NotFound desc = could not find container \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": container with ID starting with 67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.309333 4948 scope.go:117] "RemoveContainer" containerID="74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.309574 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0"} err="failed to get container status \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": rpc error: code = NotFound desc = could not find container \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": container with ID starting with 74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.309589 4948 scope.go:117] "RemoveContainer" containerID="ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.309886 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b"} err="failed to get container status \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": rpc error: code = NotFound desc = could not find container \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": container with ID starting with ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.309932 4948 scope.go:117] "RemoveContainer" containerID="7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.310242 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331"} err="failed to get container status \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": rpc error: code = NotFound desc = could not find container \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": container with ID starting with 7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.310313 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.310596 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4"} err="failed to get container status \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": rpc error: code = NotFound desc = could not find container \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": container with ID starting with a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.310638 4948 scope.go:117] "RemoveContainer" containerID="d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.311018 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737"} err="failed to get container status \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": rpc error: code = NotFound desc = could not find container \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": container with ID starting with d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.311064 4948 scope.go:117] "RemoveContainer" containerID="2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.311337 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82"} err="failed to get container status \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": rpc error: code = NotFound desc = could not find container \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": container with ID starting with 2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.311354 4948 scope.go:117] "RemoveContainer" containerID="93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.311591 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d"} err="failed to get container status \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": rpc error: code = NotFound desc = could not find container \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": container with ID starting with 93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.311675 4948 scope.go:117] "RemoveContainer" containerID="11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.311965 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a"} err="failed to get container status \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": rpc error: code = NotFound desc = could not find container \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": container with ID starting with 11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.311980 4948 scope.go:117] "RemoveContainer" containerID="9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312163 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7"} err="failed to get container status \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": rpc error: code = NotFound desc = could not find container \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": container with ID starting with 9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312175 4948 scope.go:117] "RemoveContainer" containerID="67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312332 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f"} err="failed to get container status \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": rpc error: code = NotFound desc = could not find container \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": container with ID starting with 67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312344 4948 scope.go:117] "RemoveContainer" containerID="74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312498 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0"} err="failed to get container status \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": rpc error: code = NotFound desc = could not find container \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": container with ID starting with 74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312511 4948 scope.go:117] "RemoveContainer" containerID="ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312715 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b"} err="failed to get container status \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": rpc error: code = NotFound desc = could not find container \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": container with ID starting with ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312727 4948 scope.go:117] "RemoveContainer" containerID="7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312946 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331"} err="failed to get container status \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": rpc error: code = NotFound desc = could not find container \"7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331\": container with ID starting with 7e44e03f47568e3c642c797257ba968c3edd7ff493ccc9aebfa0c6b428e82331 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.312960 4948 scope.go:117] "RemoveContainer" containerID="a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.313288 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4"} err="failed to get container status \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": rpc error: code = NotFound desc = could not find container \"a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4\": container with ID starting with a6f023d000e6129f2a1d638337a416ecabe2f8d4154ba376c9bae2210977a8f4 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.313334 4948 scope.go:117] "RemoveContainer" containerID="d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.313957 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737"} err="failed to get container status \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": rpc error: code = NotFound desc = could not find container \"d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737\": container with ID starting with d9beff4acda59bc7aa472907931b4e0e0388d2dd6123561c7445398e44a1e737 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.314010 4948 scope.go:117] "RemoveContainer" containerID="2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.314394 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82"} err="failed to get container status \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": rpc error: code = NotFound desc = could not find container \"2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82\": container with ID starting with 2d0a6e5de3223cecb5fb88b3f169b1ce19c0256f7398097ebeb44c0b6abc6a82 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.314418 4948 scope.go:117] "RemoveContainer" containerID="93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.314596 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d"} err="failed to get container status \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": rpc error: code = NotFound desc = could not find container \"93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d\": container with ID starting with 93a49b6d55567001ef3e2cb54d7c066247fe0bb72f76bdfcef2b1555c52d1b9d not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.314616 4948 scope.go:117] "RemoveContainer" containerID="11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.314981 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a"} err="failed to get container status \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": rpc error: code = NotFound desc = could not find container \"11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a\": container with ID starting with 11bce2e06041361befa65b495d312d597e8303e9236cbee6d978ce9a64330c8a not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.315001 4948 scope.go:117] "RemoveContainer" containerID="9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.315198 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7"} err="failed to get container status \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": rpc error: code = NotFound desc = could not find container \"9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7\": container with ID starting with 9380365ca6670adb3a02a9482e4a2dc2d07ec502dea8bd563a47597c5c61e7e7 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.315222 4948 scope.go:117] "RemoveContainer" containerID="67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.315398 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f"} err="failed to get container status \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": rpc error: code = NotFound desc = could not find container \"67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f\": container with ID starting with 67fe04a3ac46c665bd6fd824ab62147a6461a96dbd6c7f75bfab4188b402d75f not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.315417 4948 scope.go:117] "RemoveContainer" containerID="74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.315895 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0"} err="failed to get container status \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": rpc error: code = NotFound desc = could not find container \"74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0\": container with ID starting with 74c3df41c08c3ac8d8eac2dff03a61487af57f45cdee9b3bf0944367ff240af0 not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.315912 4948 scope.go:117] "RemoveContainer" containerID="ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.316086 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b"} err="failed to get container status \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": rpc error: code = NotFound desc = could not find container \"ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b\": container with ID starting with ff92396ebff8d989a213ef09699cc5f186b020782220281287b94317fc67e97b not found: ID does not exist" Jan 20 19:58:24 crc kubenswrapper[4948]: I0120 19:58:24.576895 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b00db8b2-f5fb-476f-bfc1-95c125fdaaac" path="/var/lib/kubelet/pods/b00db8b2-f5fb-476f-bfc1-95c125fdaaac/volumes" Jan 20 19:58:25 crc kubenswrapper[4948]: I0120 19:58:25.009881 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"27e0798ffc46d3048e333aa1957a2a0c4588c0273e2f2c150f42016ddad027e0"} Jan 20 19:58:25 crc kubenswrapper[4948]: I0120 19:58:25.009929 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"2d3f5729bb97b44d50ed5fd34b61cccecf51d98d33f67cff15e706df56e4585f"} Jan 20 19:58:25 crc kubenswrapper[4948]: I0120 19:58:25.009943 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"452d532ff5d356e95cda8a91367895c03463c6bb8ce5b8e314798f696259cbe9"} Jan 20 19:58:25 crc kubenswrapper[4948]: I0120 19:58:25.009953 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"4d7ad44b7896af2a87afc6f4062e27b8a3590858ba5ad30a87c6348ece2d82fe"} Jan 20 19:58:25 crc kubenswrapper[4948]: I0120 19:58:25.009963 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"7c045f9a959f74f9cd65eda36d90efbd5d38279d5d04e25e2f5a981a4e34333c"} Jan 20 19:58:25 crc kubenswrapper[4948]: I0120 19:58:25.009973 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"a20dcddd13c875c46227c4681846426b832b6669ea136e4f4e218a613b7aedec"} Jan 20 19:58:27 crc kubenswrapper[4948]: I0120 19:58:27.027029 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"77f21e1b7290d385bd1696ad5b3d9b8f87377d4cce2b97616696ce3f11e7284d"} Jan 20 19:58:30 crc kubenswrapper[4948]: I0120 19:58:30.047946 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" event={"ID":"4ed29cf1-d076-41a3-8ad1-438db91ad979","Type":"ContainerStarted","Data":"fbd0143447b20cf50e6e2dd841ac7e483beb7a0e00066b131d3539cf5a7296f9"} Jan 20 19:58:30 crc kubenswrapper[4948]: I0120 19:58:30.048499 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:30 crc kubenswrapper[4948]: I0120 19:58:30.048519 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:30 crc kubenswrapper[4948]: I0120 19:58:30.078391 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:30 crc kubenswrapper[4948]: I0120 19:58:30.086091 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" podStartSLOduration=7.086074871 podStartE2EDuration="7.086074871s" podCreationTimestamp="2026-01-20 19:58:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:58:30.08249004 +0000 UTC m=+538.033215019" watchObservedRunningTime="2026-01-20 19:58:30.086074871 +0000 UTC m=+538.036799840" Jan 20 19:58:31 crc kubenswrapper[4948]: I0120 19:58:31.055490 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:31 crc kubenswrapper[4948]: I0120 19:58:31.089513 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:58:38 crc kubenswrapper[4948]: I0120 19:58:38.570393 4948 scope.go:117] "RemoveContainer" containerID="b41d2a53810cfb4c072af0d88429759b11509193add1fb0f10d77de4d747b8b4" Jan 20 19:58:39 crc kubenswrapper[4948]: I0120 19:58:39.098345 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/1.log" Jan 20 19:58:39 crc kubenswrapper[4948]: I0120 19:58:39.098646 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-qttfm" event={"ID":"e21ac8a2-1e79-4191-b809-75085d432b31","Type":"ContainerStarted","Data":"665b3d3723095d108327e6d13280da28f760ec1eb5b3ae97d4a86bc1c08c1001"} Jan 20 19:58:50 crc kubenswrapper[4948]: I0120 19:58:50.250187 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:58:50 crc kubenswrapper[4948]: I0120 19:58:50.250936 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:58:53 crc kubenswrapper[4948]: I0120 19:58:53.579174 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-5f676" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.816778 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7"] Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.818326 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.821057 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.835684 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7"] Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.848734 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.848792 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.848847 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8lch\" (UniqueName: \"kubernetes.io/projected/d0fed87f-472d-480c-8006-2c2dc60df61e-kube-api-access-h8lch\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.950324 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.950388 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.950449 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8lch\" (UniqueName: \"kubernetes.io/projected/d0fed87f-472d-480c-8006-2c2dc60df61e-kube-api-access-h8lch\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.950830 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.951278 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:03 crc kubenswrapper[4948]: I0120 19:59:03.971056 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8lch\" (UniqueName: \"kubernetes.io/projected/d0fed87f-472d-480c-8006-2c2dc60df61e-kube-api-access-h8lch\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:04 crc kubenswrapper[4948]: I0120 19:59:04.134352 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:04 crc kubenswrapper[4948]: I0120 19:59:04.340259 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7"] Jan 20 19:59:04 crc kubenswrapper[4948]: W0120 19:59:04.348775 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0fed87f_472d_480c_8006_2c2dc60df61e.slice/crio-6219b365e921d5d139d9e4b4a7f50e70744e427fba94ef4479d01591eddfcc78 WatchSource:0}: Error finding container 6219b365e921d5d139d9e4b4a7f50e70744e427fba94ef4479d01591eddfcc78: Status 404 returned error can't find the container with id 6219b365e921d5d139d9e4b4a7f50e70744e427fba94ef4479d01591eddfcc78 Jan 20 19:59:05 crc kubenswrapper[4948]: I0120 19:59:05.278444 4948 generic.go:334] "Generic (PLEG): container finished" podID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerID="e166e06f2e649cf247e76487d448ff561ce0f403af994a0622730fa164a3cacb" exitCode=0 Jan 20 19:59:05 crc kubenswrapper[4948]: I0120 19:59:05.278550 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" event={"ID":"d0fed87f-472d-480c-8006-2c2dc60df61e","Type":"ContainerDied","Data":"e166e06f2e649cf247e76487d448ff561ce0f403af994a0622730fa164a3cacb"} Jan 20 19:59:05 crc kubenswrapper[4948]: I0120 19:59:05.278842 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" event={"ID":"d0fed87f-472d-480c-8006-2c2dc60df61e","Type":"ContainerStarted","Data":"6219b365e921d5d139d9e4b4a7f50e70744e427fba94ef4479d01591eddfcc78"} Jan 20 19:59:14 crc kubenswrapper[4948]: I0120 19:59:14.329140 4948 generic.go:334] "Generic (PLEG): container finished" podID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerID="4afbe6816412c21f8c7661a50f223c0fe45073d8110feac470041f7d1c80bd7f" exitCode=0 Jan 20 19:59:14 crc kubenswrapper[4948]: I0120 19:59:14.329255 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" event={"ID":"d0fed87f-472d-480c-8006-2c2dc60df61e","Type":"ContainerDied","Data":"4afbe6816412c21f8c7661a50f223c0fe45073d8110feac470041f7d1c80bd7f"} Jan 20 19:59:15 crc kubenswrapper[4948]: I0120 19:59:15.341032 4948 generic.go:334] "Generic (PLEG): container finished" podID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerID="f1db03038fb49d90874b456398848121f75c5ab4717de1820d995376b0200883" exitCode=0 Jan 20 19:59:15 crc kubenswrapper[4948]: I0120 19:59:15.341094 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" event={"ID":"d0fed87f-472d-480c-8006-2c2dc60df61e","Type":"ContainerDied","Data":"f1db03038fb49d90874b456398848121f75c5ab4717de1820d995376b0200883"} Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.616622 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.723548 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-bundle\") pod \"d0fed87f-472d-480c-8006-2c2dc60df61e\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.723659 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-util\") pod \"d0fed87f-472d-480c-8006-2c2dc60df61e\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.723690 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8lch\" (UniqueName: \"kubernetes.io/projected/d0fed87f-472d-480c-8006-2c2dc60df61e-kube-api-access-h8lch\") pod \"d0fed87f-472d-480c-8006-2c2dc60df61e\" (UID: \"d0fed87f-472d-480c-8006-2c2dc60df61e\") " Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.724849 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-bundle" (OuterVolumeSpecName: "bundle") pod "d0fed87f-472d-480c-8006-2c2dc60df61e" (UID: "d0fed87f-472d-480c-8006-2c2dc60df61e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.730933 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0fed87f-472d-480c-8006-2c2dc60df61e-kube-api-access-h8lch" (OuterVolumeSpecName: "kube-api-access-h8lch") pod "d0fed87f-472d-480c-8006-2c2dc60df61e" (UID: "d0fed87f-472d-480c-8006-2c2dc60df61e"). InnerVolumeSpecName "kube-api-access-h8lch". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.735650 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-util" (OuterVolumeSpecName: "util") pod "d0fed87f-472d-480c-8006-2c2dc60df61e" (UID: "d0fed87f-472d-480c-8006-2c2dc60df61e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.825398 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-util\") on node \"crc\" DevicePath \"\"" Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.825450 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8lch\" (UniqueName: \"kubernetes.io/projected/d0fed87f-472d-480c-8006-2c2dc60df61e-kube-api-access-h8lch\") on node \"crc\" DevicePath \"\"" Jan 20 19:59:16 crc kubenswrapper[4948]: I0120 19:59:16.825465 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d0fed87f-472d-480c-8006-2c2dc60df61e-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 19:59:17 crc kubenswrapper[4948]: I0120 19:59:17.355589 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" event={"ID":"d0fed87f-472d-480c-8006-2c2dc60df61e","Type":"ContainerDied","Data":"6219b365e921d5d139d9e4b4a7f50e70744e427fba94ef4479d01591eddfcc78"} Jan 20 19:59:17 crc kubenswrapper[4948]: I0120 19:59:17.355634 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6219b365e921d5d139d9e4b4a7f50e70744e427fba94ef4479d01591eddfcc78" Jan 20 19:59:17 crc kubenswrapper[4948]: I0120 19:59:17.355733 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.249827 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.250894 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.251011 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.251688 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e049e149f0a0dc1b1b363bfb2d9bdbd795da8ca2d31406285050192b1751620d"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.251846 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://e049e149f0a0dc1b1b363bfb2d9bdbd795da8ca2d31406285050192b1751620d" gracePeriod=600 Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.490780 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9ldq2"] Jan 20 19:59:20 crc kubenswrapper[4948]: E0120 19:59:20.492210 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerName="extract" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.492227 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerName="extract" Jan 20 19:59:20 crc kubenswrapper[4948]: E0120 19:59:20.492246 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerName="util" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.492253 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerName="util" Jan 20 19:59:20 crc kubenswrapper[4948]: E0120 19:59:20.492266 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerName="pull" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.492272 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerName="pull" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.495195 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0fed87f-472d-480c-8006-2c2dc60df61e" containerName="extract" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.496647 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-9ldq2" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.508724 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.508908 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.521761 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-nkjzh" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.529501 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9ldq2"] Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.674512 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r858n\" (UniqueName: \"kubernetes.io/projected/d72955e0-ce7e-4d8f-be8a-b22eee46ec69-kube-api-access-r858n\") pod \"nmstate-operator-646758c888-9ldq2\" (UID: \"d72955e0-ce7e-4d8f-be8a-b22eee46ec69\") " pod="openshift-nmstate/nmstate-operator-646758c888-9ldq2" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.775910 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r858n\" (UniqueName: \"kubernetes.io/projected/d72955e0-ce7e-4d8f-be8a-b22eee46ec69-kube-api-access-r858n\") pod \"nmstate-operator-646758c888-9ldq2\" (UID: \"d72955e0-ce7e-4d8f-be8a-b22eee46ec69\") " pod="openshift-nmstate/nmstate-operator-646758c888-9ldq2" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.795027 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r858n\" (UniqueName: \"kubernetes.io/projected/d72955e0-ce7e-4d8f-be8a-b22eee46ec69-kube-api-access-r858n\") pod \"nmstate-operator-646758c888-9ldq2\" (UID: \"d72955e0-ce7e-4d8f-be8a-b22eee46ec69\") " pod="openshift-nmstate/nmstate-operator-646758c888-9ldq2" Jan 20 19:59:20 crc kubenswrapper[4948]: I0120 19:59:20.853159 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-9ldq2" Jan 20 19:59:21 crc kubenswrapper[4948]: I0120 19:59:21.337822 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9ldq2"] Jan 20 19:59:21 crc kubenswrapper[4948]: I0120 19:59:21.381056 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-9ldq2" event={"ID":"d72955e0-ce7e-4d8f-be8a-b22eee46ec69","Type":"ContainerStarted","Data":"73e9b2eb74f1781a65beb49a2467ccce3c8694b7df4f71a05aa6b0d1cae8d521"} Jan 20 19:59:21 crc kubenswrapper[4948]: I0120 19:59:21.383416 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="e049e149f0a0dc1b1b363bfb2d9bdbd795da8ca2d31406285050192b1751620d" exitCode=0 Jan 20 19:59:21 crc kubenswrapper[4948]: I0120 19:59:21.383461 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"e049e149f0a0dc1b1b363bfb2d9bdbd795da8ca2d31406285050192b1751620d"} Jan 20 19:59:21 crc kubenswrapper[4948]: I0120 19:59:21.383487 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"d62e03ef00dbbeb77df97565ffab795a12284dfbc62cb77594b2a0a88f280a6c"} Jan 20 19:59:21 crc kubenswrapper[4948]: I0120 19:59:21.383503 4948 scope.go:117] "RemoveContainer" containerID="615f93555b1b0a9ccd007e1b86dbe692ba729e13c19eaa173e866087cfea406b" Jan 20 19:59:24 crc kubenswrapper[4948]: I0120 19:59:24.405610 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-9ldq2" event={"ID":"d72955e0-ce7e-4d8f-be8a-b22eee46ec69","Type":"ContainerStarted","Data":"80cf9907ba7c362f5e1a7b982ba168f858508b7c320da6dd641c3da723695af0"} Jan 20 19:59:24 crc kubenswrapper[4948]: I0120 19:59:24.437595 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-9ldq2" podStartSLOduration=2.308155526 podStartE2EDuration="4.437563552s" podCreationTimestamp="2026-01-20 19:59:20 +0000 UTC" firstStartedPulling="2026-01-20 19:59:21.359370202 +0000 UTC m=+589.310095171" lastFinishedPulling="2026-01-20 19:59:23.488778228 +0000 UTC m=+591.439503197" observedRunningTime="2026-01-20 19:59:24.431860851 +0000 UTC m=+592.382585820" watchObservedRunningTime="2026-01-20 19:59:24.437563552 +0000 UTC m=+592.388288531" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.532613 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-jq57s"] Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.535210 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.537268 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-bbmmt" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.560837 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c"] Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.561956 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.564408 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.566634 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-jq57s"] Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.607574 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-nqpgc"] Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.608266 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.614664 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c"] Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.641988 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b4431242-1662-43bd-bbfc-192d87f5393b-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-6lt8c\" (UID: \"b4431242-1662-43bd-bbfc-192d87f5393b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.642087 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knrvg\" (UniqueName: \"kubernetes.io/projected/34b9a637-f29d-49ad-961c-d923e71907e1-kube-api-access-knrvg\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.642117 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-ovs-socket\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.642140 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-dbus-socket\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.642175 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7gpv\" (UniqueName: \"kubernetes.io/projected/d7a43a4d-6505-4105-bfb8-c1239d0436e8-kube-api-access-v7gpv\") pod \"nmstate-metrics-54757c584b-jq57s\" (UID: \"d7a43a4d-6505-4105-bfb8-c1239d0436e8\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.642257 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-nmstate-lock\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.642321 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sppbk\" (UniqueName: \"kubernetes.io/projected/b4431242-1662-43bd-bbfc-192d87f5393b-kube-api-access-sppbk\") pod \"nmstate-webhook-8474b5b9d8-6lt8c\" (UID: \"b4431242-1662-43bd-bbfc-192d87f5393b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.743843 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-nmstate-lock\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.743964 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-nmstate-lock\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.744094 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sppbk\" (UniqueName: \"kubernetes.io/projected/b4431242-1662-43bd-bbfc-192d87f5393b-kube-api-access-sppbk\") pod \"nmstate-webhook-8474b5b9d8-6lt8c\" (UID: \"b4431242-1662-43bd-bbfc-192d87f5393b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.744327 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b4431242-1662-43bd-bbfc-192d87f5393b-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-6lt8c\" (UID: \"b4431242-1662-43bd-bbfc-192d87f5393b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.744419 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knrvg\" (UniqueName: \"kubernetes.io/projected/34b9a637-f29d-49ad-961c-d923e71907e1-kube-api-access-knrvg\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.744528 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-ovs-socket\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.744628 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-dbus-socket\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.745192 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7gpv\" (UniqueName: \"kubernetes.io/projected/d7a43a4d-6505-4105-bfb8-c1239d0436e8-kube-api-access-v7gpv\") pod \"nmstate-metrics-54757c584b-jq57s\" (UID: \"d7a43a4d-6505-4105-bfb8-c1239d0436e8\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" Jan 20 19:59:25 crc kubenswrapper[4948]: E0120 19:59:25.744451 4948 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 20 19:59:25 crc kubenswrapper[4948]: E0120 19:59:25.745656 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b4431242-1662-43bd-bbfc-192d87f5393b-tls-key-pair podName:b4431242-1662-43bd-bbfc-192d87f5393b nodeName:}" failed. No retries permitted until 2026-01-20 19:59:26.245639132 +0000 UTC m=+594.196364101 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/b4431242-1662-43bd-bbfc-192d87f5393b-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-6lt8c" (UID: "b4431242-1662-43bd-bbfc-192d87f5393b") : secret "openshift-nmstate-webhook" not found Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.744576 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-ovs-socket\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.745150 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/34b9a637-f29d-49ad-961c-d923e71907e1-dbus-socket\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.779333 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7gpv\" (UniqueName: \"kubernetes.io/projected/d7a43a4d-6505-4105-bfb8-c1239d0436e8-kube-api-access-v7gpv\") pod \"nmstate-metrics-54757c584b-jq57s\" (UID: \"d7a43a4d-6505-4105-bfb8-c1239d0436e8\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.780236 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9"] Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.781007 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.789893 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sppbk\" (UniqueName: \"kubernetes.io/projected/b4431242-1662-43bd-bbfc-192d87f5393b-kube-api-access-sppbk\") pod \"nmstate-webhook-8474b5b9d8-6lt8c\" (UID: \"b4431242-1662-43bd-bbfc-192d87f5393b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.800489 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.800515 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.800593 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-4hmr4" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.832196 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9"] Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.847649 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km2g5\" (UniqueName: \"kubernetes.io/projected/a0bd44ac-39a0-4aed-8a23-d12330d46924-kube-api-access-km2g5\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.848001 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0bd44ac-39a0-4aed-8a23-d12330d46924-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.848170 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/a0bd44ac-39a0-4aed-8a23-d12330d46924-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.854916 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knrvg\" (UniqueName: \"kubernetes.io/projected/34b9a637-f29d-49ad-961c-d923e71907e1-kube-api-access-knrvg\") pod \"nmstate-handler-nqpgc\" (UID: \"34b9a637-f29d-49ad-961c-d923e71907e1\") " pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:25 crc kubenswrapper[4948]: I0120 19:59:25.855211 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:25.996043 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-568fd6f89f-fcgm2"] Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:25.996813 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.014310 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km2g5\" (UniqueName: \"kubernetes.io/projected/a0bd44ac-39a0-4aed-8a23-d12330d46924-kube-api-access-km2g5\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.014351 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0bd44ac-39a0-4aed-8a23-d12330d46924-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.014393 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/a0bd44ac-39a0-4aed-8a23-d12330d46924-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.015188 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/a0bd44ac-39a0-4aed-8a23-d12330d46924-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:26 crc kubenswrapper[4948]: E0120 19:59:26.016332 4948 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 20 19:59:26 crc kubenswrapper[4948]: E0120 19:59:26.016379 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a0bd44ac-39a0-4aed-8a23-d12330d46924-plugin-serving-cert podName:a0bd44ac-39a0-4aed-8a23-d12330d46924 nodeName:}" failed. No retries permitted until 2026-01-20 19:59:26.516363001 +0000 UTC m=+594.467087970 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/a0bd44ac-39a0-4aed-8a23-d12330d46924-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-czsd9" (UID: "a0bd44ac-39a0-4aed-8a23-d12330d46924") : secret "plugin-serving-cert" not found Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.019399 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:26 crc kubenswrapper[4948]: W0120 19:59:26.052893 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34b9a637_f29d_49ad_961c_d923e71907e1.slice/crio-b2b92a6acd0ef64a95a84e4104ef48da22dd91bf837a76b235b61881fb9f7fbf WatchSource:0}: Error finding container b2b92a6acd0ef64a95a84e4104ef48da22dd91bf837a76b235b61881fb9f7fbf: Status 404 returned error can't find the container with id b2b92a6acd0ef64a95a84e4104ef48da22dd91bf837a76b235b61881fb9f7fbf Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.063551 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km2g5\" (UniqueName: \"kubernetes.io/projected/a0bd44ac-39a0-4aed-8a23-d12330d46924-kube-api-access-km2g5\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.089805 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-568fd6f89f-fcgm2"] Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.116292 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/18743f08-4689-428c-a15e-8fad44cc8d48-console-oauth-config\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.116330 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-console-config\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.116349 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/18743f08-4689-428c-a15e-8fad44cc8d48-console-serving-cert\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.116367 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-service-ca\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.116392 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-trusted-ca-bundle\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.116444 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-oauth-serving-cert\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.116462 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw9hg\" (UniqueName: \"kubernetes.io/projected/18743f08-4689-428c-a15e-8fad44cc8d48-kube-api-access-xw9hg\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.217647 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-trusted-ca-bundle\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.218067 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-oauth-serving-cert\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.218096 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw9hg\" (UniqueName: \"kubernetes.io/projected/18743f08-4689-428c-a15e-8fad44cc8d48-kube-api-access-xw9hg\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.218134 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/18743f08-4689-428c-a15e-8fad44cc8d48-console-oauth-config\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.218161 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-console-config\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.218180 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/18743f08-4689-428c-a15e-8fad44cc8d48-console-serving-cert\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.218210 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-service-ca\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.219145 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-console-config\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.219277 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-oauth-serving-cert\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.219470 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-trusted-ca-bundle\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.219792 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/18743f08-4689-428c-a15e-8fad44cc8d48-service-ca\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.225388 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/18743f08-4689-428c-a15e-8fad44cc8d48-console-oauth-config\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.225554 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/18743f08-4689-428c-a15e-8fad44cc8d48-console-serving-cert\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.242554 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw9hg\" (UniqueName: \"kubernetes.io/projected/18743f08-4689-428c-a15e-8fad44cc8d48-kube-api-access-xw9hg\") pod \"console-568fd6f89f-fcgm2\" (UID: \"18743f08-4689-428c-a15e-8fad44cc8d48\") " pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.321057 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b4431242-1662-43bd-bbfc-192d87f5393b-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-6lt8c\" (UID: \"b4431242-1662-43bd-bbfc-192d87f5393b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.321920 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.326741 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b4431242-1662-43bd-bbfc-192d87f5393b-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-6lt8c\" (UID: \"b4431242-1662-43bd-bbfc-192d87f5393b\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.421009 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-nqpgc" event={"ID":"34b9a637-f29d-49ad-961c-d923e71907e1","Type":"ContainerStarted","Data":"b2b92a6acd0ef64a95a84e4104ef48da22dd91bf837a76b235b61881fb9f7fbf"} Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.477111 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.523885 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0bd44ac-39a0-4aed-8a23-d12330d46924-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.529797 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/a0bd44ac-39a0-4aed-8a23-d12330d46924-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-czsd9\" (UID: \"a0bd44ac-39a0-4aed-8a23-d12330d46924\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.774907 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" Jan 20 19:59:26 crc kubenswrapper[4948]: I0120 19:59:26.884336 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-jq57s"] Jan 20 19:59:26 crc kubenswrapper[4948]: W0120 19:59:26.893172 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7a43a4d_6505_4105_bfb8_c1239d0436e8.slice/crio-a31ac3dccc2237333f55138ba4b3510a126fe190734ed0d809ae1f02f381c9cb WatchSource:0}: Error finding container a31ac3dccc2237333f55138ba4b3510a126fe190734ed0d809ae1f02f381c9cb: Status 404 returned error can't find the container with id a31ac3dccc2237333f55138ba4b3510a126fe190734ed0d809ae1f02f381c9cb Jan 20 19:59:27 crc kubenswrapper[4948]: W0120 19:59:27.079920 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0bd44ac_39a0_4aed_8a23_d12330d46924.slice/crio-284ec8de95be5f97f03ccfc99e295a5ecb2d4406c7180498d072b59862b3ccf1 WatchSource:0}: Error finding container 284ec8de95be5f97f03ccfc99e295a5ecb2d4406c7180498d072b59862b3ccf1: Status 404 returned error can't find the container with id 284ec8de95be5f97f03ccfc99e295a5ecb2d4406c7180498d072b59862b3ccf1 Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.080720 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9"] Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.198096 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c"] Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.242399 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-568fd6f89f-fcgm2"] Jan 20 19:59:27 crc kubenswrapper[4948]: W0120 19:59:27.246183 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18743f08_4689_428c_a15e_8fad44cc8d48.slice/crio-76a5e96dc99678750b1fbda1ea5ae3110c9f19597038036c94c9c12baabdce31 WatchSource:0}: Error finding container 76a5e96dc99678750b1fbda1ea5ae3110c9f19597038036c94c9c12baabdce31: Status 404 returned error can't find the container with id 76a5e96dc99678750b1fbda1ea5ae3110c9f19597038036c94c9c12baabdce31 Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.427087 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-568fd6f89f-fcgm2" event={"ID":"18743f08-4689-428c-a15e-8fad44cc8d48","Type":"ContainerStarted","Data":"0e2e7273853a05b07723c9a38c515e4430185b987445c190a558a9f910cbe803"} Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.427338 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-568fd6f89f-fcgm2" event={"ID":"18743f08-4689-428c-a15e-8fad44cc8d48","Type":"ContainerStarted","Data":"76a5e96dc99678750b1fbda1ea5ae3110c9f19597038036c94c9c12baabdce31"} Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.429321 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" event={"ID":"a0bd44ac-39a0-4aed-8a23-d12330d46924","Type":"ContainerStarted","Data":"284ec8de95be5f97f03ccfc99e295a5ecb2d4406c7180498d072b59862b3ccf1"} Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.430070 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" event={"ID":"d7a43a4d-6505-4105-bfb8-c1239d0436e8","Type":"ContainerStarted","Data":"a31ac3dccc2237333f55138ba4b3510a126fe190734ed0d809ae1f02f381c9cb"} Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.434227 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" event={"ID":"b4431242-1662-43bd-bbfc-192d87f5393b","Type":"ContainerStarted","Data":"8b427d1b29be86cdd90c57572a00d8eeb254120911bc9690e0a0689fee969d21"} Jan 20 19:59:27 crc kubenswrapper[4948]: I0120 19:59:27.447750 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-568fd6f89f-fcgm2" podStartSLOduration=2.44769837 podStartE2EDuration="2.44769837s" podCreationTimestamp="2026-01-20 19:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 19:59:27.445303553 +0000 UTC m=+595.396028522" watchObservedRunningTime="2026-01-20 19:59:27.44769837 +0000 UTC m=+595.398423339" Jan 20 19:59:29 crc kubenswrapper[4948]: I0120 19:59:29.458033 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" event={"ID":"d7a43a4d-6505-4105-bfb8-c1239d0436e8","Type":"ContainerStarted","Data":"db4aa9c4ae0deb9d1be78445c89a5819fbfbc1d9c848d33edcfdb8b4c2344b61"} Jan 20 19:59:30 crc kubenswrapper[4948]: I0120 19:59:30.476811 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-nqpgc" event={"ID":"34b9a637-f29d-49ad-961c-d923e71907e1","Type":"ContainerStarted","Data":"6b338d591e5fd001d4a29c713dbf02010ab36b62fa5e32452f9c8e69401c5f79"} Jan 20 19:59:30 crc kubenswrapper[4948]: I0120 19:59:30.477191 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:30 crc kubenswrapper[4948]: I0120 19:59:30.479694 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" event={"ID":"b4431242-1662-43bd-bbfc-192d87f5393b","Type":"ContainerStarted","Data":"37d38e566cdd5a1928e8a383b4fc4dd4b16188a90cfc4e476443ed6b03093b34"} Jan 20 19:59:30 crc kubenswrapper[4948]: I0120 19:59:30.479893 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 19:59:30 crc kubenswrapper[4948]: I0120 19:59:30.504137 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-nqpgc" podStartSLOduration=2.377679964 podStartE2EDuration="5.504120604s" podCreationTimestamp="2026-01-20 19:59:25 +0000 UTC" firstStartedPulling="2026-01-20 19:59:26.054928129 +0000 UTC m=+594.005653098" lastFinishedPulling="2026-01-20 19:59:29.181368689 +0000 UTC m=+597.132093738" observedRunningTime="2026-01-20 19:59:30.494851973 +0000 UTC m=+598.445576942" watchObservedRunningTime="2026-01-20 19:59:30.504120604 +0000 UTC m=+598.454845573" Jan 20 19:59:30 crc kubenswrapper[4948]: I0120 19:59:30.517762 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" podStartSLOduration=3.54911945 podStartE2EDuration="5.517743699s" podCreationTimestamp="2026-01-20 19:59:25 +0000 UTC" firstStartedPulling="2026-01-20 19:59:27.220739096 +0000 UTC m=+595.171464065" lastFinishedPulling="2026-01-20 19:59:29.189363345 +0000 UTC m=+597.140088314" observedRunningTime="2026-01-20 19:59:30.517352068 +0000 UTC m=+598.468077047" watchObservedRunningTime="2026-01-20 19:59:30.517743699 +0000 UTC m=+598.468468688" Jan 20 19:59:31 crc kubenswrapper[4948]: I0120 19:59:31.485933 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" event={"ID":"a0bd44ac-39a0-4aed-8a23-d12330d46924","Type":"ContainerStarted","Data":"797611bbae248ead79c466dc3e92a7426ef39c3bf19d01282ce946f6bac3914d"} Jan 20 19:59:31 crc kubenswrapper[4948]: I0120 19:59:31.511011 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-czsd9" podStartSLOduration=3.046657762 podStartE2EDuration="6.510982626s" podCreationTimestamp="2026-01-20 19:59:25 +0000 UTC" firstStartedPulling="2026-01-20 19:59:27.081678302 +0000 UTC m=+595.032403271" lastFinishedPulling="2026-01-20 19:59:30.546003166 +0000 UTC m=+598.496728135" observedRunningTime="2026-01-20 19:59:31.507517738 +0000 UTC m=+599.458242707" watchObservedRunningTime="2026-01-20 19:59:31.510982626 +0000 UTC m=+599.461707605" Jan 20 19:59:32 crc kubenswrapper[4948]: I0120 19:59:32.493933 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" event={"ID":"d7a43a4d-6505-4105-bfb8-c1239d0436e8","Type":"ContainerStarted","Data":"136037306b05d23f8775c8b474b4d3ecaf9fe930ef8a9f7a6e4a80b0f2ada236"} Jan 20 19:59:32 crc kubenswrapper[4948]: I0120 19:59:32.519529 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-jq57s" podStartSLOduration=2.161487684 podStartE2EDuration="7.519476263s" podCreationTimestamp="2026-01-20 19:59:25 +0000 UTC" firstStartedPulling="2026-01-20 19:59:26.89628003 +0000 UTC m=+594.847004999" lastFinishedPulling="2026-01-20 19:59:32.254268609 +0000 UTC m=+600.204993578" observedRunningTime="2026-01-20 19:59:32.514895504 +0000 UTC m=+600.465620503" watchObservedRunningTime="2026-01-20 19:59:32.519476263 +0000 UTC m=+600.470201282" Jan 20 19:59:36 crc kubenswrapper[4948]: I0120 19:59:36.042491 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-nqpgc" Jan 20 19:59:36 crc kubenswrapper[4948]: I0120 19:59:36.323176 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:36 crc kubenswrapper[4948]: I0120 19:59:36.323239 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:36 crc kubenswrapper[4948]: I0120 19:59:36.327983 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:36 crc kubenswrapper[4948]: I0120 19:59:36.526305 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-568fd6f89f-fcgm2" Jan 20 19:59:36 crc kubenswrapper[4948]: I0120 19:59:36.584233 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-lxvjj"] Jan 20 19:59:40 crc kubenswrapper[4948]: I0120 19:59:40.536379 4948 scope.go:117] "RemoveContainer" containerID="78733da8e436856ad89bc8e5fe0dc5db88ece6739df841ddd4e3c6fa7001a80b" Jan 20 19:59:46 crc kubenswrapper[4948]: I0120 19:59:46.485358 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-6lt8c" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.188570 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w"] Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.190027 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.193206 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.193573 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.208936 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w"] Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.216013 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-secret-volume\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.216063 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-config-volume\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.216102 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz4fl\" (UniqueName: \"kubernetes.io/projected/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-kube-api-access-rz4fl\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.317153 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-secret-volume\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.317227 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-config-volume\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.317282 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz4fl\" (UniqueName: \"kubernetes.io/projected/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-kube-api-access-rz4fl\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.318895 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-config-volume\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.338164 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-secret-volume\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.342545 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz4fl\" (UniqueName: \"kubernetes.io/projected/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-kube-api-access-rz4fl\") pod \"collect-profiles-29482320-96r5w\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.513255 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:00 crc kubenswrapper[4948]: I0120 20:00:00.871427 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w"] Jan 20 20:00:01 crc kubenswrapper[4948]: I0120 20:00:01.642887 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-lxvjj" podUID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" containerName="console" containerID="cri-o://77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf" gracePeriod=15 Jan 20 20:00:01 crc kubenswrapper[4948]: I0120 20:00:01.671558 4948 generic.go:334] "Generic (PLEG): container finished" podID="0573d7c9-3516-40cd-a9f5-3f8e99ad8c39" containerID="2900eadc7a9ab5d06018d0b68d33bfa089181e42e6002569f96e04453237ae78" exitCode=0 Jan 20 20:00:01 crc kubenswrapper[4948]: I0120 20:00:01.671607 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" event={"ID":"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39","Type":"ContainerDied","Data":"2900eadc7a9ab5d06018d0b68d33bfa089181e42e6002569f96e04453237ae78"} Jan 20 20:00:01 crc kubenswrapper[4948]: I0120 20:00:01.671647 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" event={"ID":"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39","Type":"ContainerStarted","Data":"cbe34aac93a170adfa46fc6b65c14e761c37660c1159d1374b79cc658741f88e"} Jan 20 20:00:01 crc kubenswrapper[4948]: I0120 20:00:01.984761 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-lxvjj_fe57b94e-b773-4dc8-9a99-a2217ab4040c/console/0.log" Jan 20 20:00:01 crc kubenswrapper[4948]: I0120 20:00:01.985086 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.033156 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8"] Jan 20 20:00:02 crc kubenswrapper[4948]: E0120 20:00:02.033508 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" containerName="console" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.033528 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" containerName="console" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.033680 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" containerName="console" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.034677 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.038075 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.048122 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8"] Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.138314 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-serving-cert\") pod \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.138393 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-oauth-config\") pod \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.138474 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-trusted-ca-bundle\") pod \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.138506 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-config\") pod \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.138541 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-oauth-serving-cert\") pod \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.138570 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7g2c\" (UniqueName: \"kubernetes.io/projected/fe57b94e-b773-4dc8-9a99-a2217ab4040c-kube-api-access-z7g2c\") pod \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.138638 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-service-ca\") pod \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\" (UID: \"fe57b94e-b773-4dc8-9a99-a2217ab4040c\") " Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.138895 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.139288 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.139378 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz4qm\" (UniqueName: \"kubernetes.io/projected/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-kube-api-access-tz4qm\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.140213 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-service-ca" (OuterVolumeSpecName: "service-ca") pod "fe57b94e-b773-4dc8-9a99-a2217ab4040c" (UID: "fe57b94e-b773-4dc8-9a99-a2217ab4040c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.140224 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "fe57b94e-b773-4dc8-9a99-a2217ab4040c" (UID: "fe57b94e-b773-4dc8-9a99-a2217ab4040c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.140316 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-config" (OuterVolumeSpecName: "console-config") pod "fe57b94e-b773-4dc8-9a99-a2217ab4040c" (UID: "fe57b94e-b773-4dc8-9a99-a2217ab4040c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.140829 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "fe57b94e-b773-4dc8-9a99-a2217ab4040c" (UID: "fe57b94e-b773-4dc8-9a99-a2217ab4040c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.150200 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "fe57b94e-b773-4dc8-9a99-a2217ab4040c" (UID: "fe57b94e-b773-4dc8-9a99-a2217ab4040c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.153013 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "fe57b94e-b773-4dc8-9a99-a2217ab4040c" (UID: "fe57b94e-b773-4dc8-9a99-a2217ab4040c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.153094 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe57b94e-b773-4dc8-9a99-a2217ab4040c-kube-api-access-z7g2c" (OuterVolumeSpecName: "kube-api-access-z7g2c") pod "fe57b94e-b773-4dc8-9a99-a2217ab4040c" (UID: "fe57b94e-b773-4dc8-9a99-a2217ab4040c"). InnerVolumeSpecName "kube-api-access-z7g2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.240599 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.240751 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.240821 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz4qm\" (UniqueName: \"kubernetes.io/projected/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-kube-api-access-tz4qm\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.240971 4948 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.240994 4948 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.241014 4948 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.241032 4948 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-console-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.241049 4948 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.241069 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7g2c\" (UniqueName: \"kubernetes.io/projected/fe57b94e-b773-4dc8-9a99-a2217ab4040c-kube-api-access-z7g2c\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.241087 4948 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe57b94e-b773-4dc8-9a99-a2217ab4040c-service-ca\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.241227 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.241518 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.263791 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz4qm\" (UniqueName: \"kubernetes.io/projected/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-kube-api-access-tz4qm\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.359273 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.625086 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8"] Jan 20 20:00:02 crc kubenswrapper[4948]: W0120 20:00:02.627455 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd79fcc60_85eb_450d_8d37_5b00b0af4ea0.slice/crio-522552c9b8aff6bc6ef251147b2ae68f37d674f5b7dba9c97a5d5a1d9afcfb65 WatchSource:0}: Error finding container 522552c9b8aff6bc6ef251147b2ae68f37d674f5b7dba9c97a5d5a1d9afcfb65: Status 404 returned error can't find the container with id 522552c9b8aff6bc6ef251147b2ae68f37d674f5b7dba9c97a5d5a1d9afcfb65 Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.707141 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" event={"ID":"d79fcc60-85eb-450d-8d37-5b00b0af4ea0","Type":"ContainerStarted","Data":"522552c9b8aff6bc6ef251147b2ae68f37d674f5b7dba9c97a5d5a1d9afcfb65"} Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.709857 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-lxvjj_fe57b94e-b773-4dc8-9a99-a2217ab4040c/console/0.log" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.709903 4948 generic.go:334] "Generic (PLEG): container finished" podID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" containerID="77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf" exitCode=2 Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.709971 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lxvjj" event={"ID":"fe57b94e-b773-4dc8-9a99-a2217ab4040c","Type":"ContainerDied","Data":"77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf"} Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.709999 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-lxvjj" event={"ID":"fe57b94e-b773-4dc8-9a99-a2217ab4040c","Type":"ContainerDied","Data":"26f0b10cf419ac44b9997f8537444c6b33e634e3b8c5ad4afb3a6bdad64761ad"} Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.710016 4948 scope.go:117] "RemoveContainer" containerID="77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.710019 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-lxvjj" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.729085 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-lxvjj"] Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.734779 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-lxvjj"] Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.737609 4948 scope.go:117] "RemoveContainer" containerID="77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf" Jan 20 20:00:02 crc kubenswrapper[4948]: E0120 20:00:02.738221 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf\": container with ID starting with 77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf not found: ID does not exist" containerID="77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf" Jan 20 20:00:02 crc kubenswrapper[4948]: I0120 20:00:02.738270 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf"} err="failed to get container status \"77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf\": rpc error: code = NotFound desc = could not find container \"77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf\": container with ID starting with 77c1aec8e4a3e5ba3f94c45a892bce13de3ec9b61c8ab2388a0151436b91e9bf not found: ID does not exist" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.122445 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.263755 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz4fl\" (UniqueName: \"kubernetes.io/projected/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-kube-api-access-rz4fl\") pod \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.263842 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-config-volume\") pod \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.263942 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-secret-volume\") pod \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\" (UID: \"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39\") " Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.265410 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-config-volume" (OuterVolumeSpecName: "config-volume") pod "0573d7c9-3516-40cd-a9f5-3f8e99ad8c39" (UID: "0573d7c9-3516-40cd-a9f5-3f8e99ad8c39"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.268742 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0573d7c9-3516-40cd-a9f5-3f8e99ad8c39" (UID: "0573d7c9-3516-40cd-a9f5-3f8e99ad8c39"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.269079 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-kube-api-access-rz4fl" (OuterVolumeSpecName: "kube-api-access-rz4fl") pod "0573d7c9-3516-40cd-a9f5-3f8e99ad8c39" (UID: "0573d7c9-3516-40cd-a9f5-3f8e99ad8c39"). InnerVolumeSpecName "kube-api-access-rz4fl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.365781 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.365820 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.365830 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rz4fl\" (UniqueName: \"kubernetes.io/projected/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39-kube-api-access-rz4fl\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.717566 4948 generic.go:334] "Generic (PLEG): container finished" podID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerID="38c9af3180106ad820ce252e97170ec1f033658f34ab646e468dcc8e1499907a" exitCode=0 Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.717635 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" event={"ID":"d79fcc60-85eb-450d-8d37-5b00b0af4ea0","Type":"ContainerDied","Data":"38c9af3180106ad820ce252e97170ec1f033658f34ab646e468dcc8e1499907a"} Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.723751 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" event={"ID":"0573d7c9-3516-40cd-a9f5-3f8e99ad8c39","Type":"ContainerDied","Data":"cbe34aac93a170adfa46fc6b65c14e761c37660c1159d1374b79cc658741f88e"} Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.723796 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbe34aac93a170adfa46fc6b65c14e761c37660c1159d1374b79cc658741f88e" Jan 20 20:00:03 crc kubenswrapper[4948]: I0120 20:00:03.723873 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w" Jan 20 20:00:04 crc kubenswrapper[4948]: I0120 20:00:04.576960 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe57b94e-b773-4dc8-9a99-a2217ab4040c" path="/var/lib/kubelet/pods/fe57b94e-b773-4dc8-9a99-a2217ab4040c/volumes" Jan 20 20:00:06 crc kubenswrapper[4948]: I0120 20:00:06.745094 4948 generic.go:334] "Generic (PLEG): container finished" podID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerID="9a37f19deab764cc21fe1722fbe7d355ef4a1c15bee3832cacae71fa3884bd0f" exitCode=0 Jan 20 20:00:06 crc kubenswrapper[4948]: I0120 20:00:06.745149 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" event={"ID":"d79fcc60-85eb-450d-8d37-5b00b0af4ea0","Type":"ContainerDied","Data":"9a37f19deab764cc21fe1722fbe7d355ef4a1c15bee3832cacae71fa3884bd0f"} Jan 20 20:00:07 crc kubenswrapper[4948]: I0120 20:00:07.752415 4948 generic.go:334] "Generic (PLEG): container finished" podID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerID="9ea9e3813d9876e4d4a20621bc98dba9a561c9354ca44068d25673dc0d524dc1" exitCode=0 Jan 20 20:00:07 crc kubenswrapper[4948]: I0120 20:00:07.752624 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" event={"ID":"d79fcc60-85eb-450d-8d37-5b00b0af4ea0","Type":"ContainerDied","Data":"9ea9e3813d9876e4d4a20621bc98dba9a561c9354ca44068d25673dc0d524dc1"} Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.019787 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.105020 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-util\") pod \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.105057 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-bundle\") pod \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.105083 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tz4qm\" (UniqueName: \"kubernetes.io/projected/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-kube-api-access-tz4qm\") pod \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\" (UID: \"d79fcc60-85eb-450d-8d37-5b00b0af4ea0\") " Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.107131 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-bundle" (OuterVolumeSpecName: "bundle") pod "d79fcc60-85eb-450d-8d37-5b00b0af4ea0" (UID: "d79fcc60-85eb-450d-8d37-5b00b0af4ea0"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.113940 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-kube-api-access-tz4qm" (OuterVolumeSpecName: "kube-api-access-tz4qm") pod "d79fcc60-85eb-450d-8d37-5b00b0af4ea0" (UID: "d79fcc60-85eb-450d-8d37-5b00b0af4ea0"). InnerVolumeSpecName "kube-api-access-tz4qm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.114358 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-util" (OuterVolumeSpecName: "util") pod "d79fcc60-85eb-450d-8d37-5b00b0af4ea0" (UID: "d79fcc60-85eb-450d-8d37-5b00b0af4ea0"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.207677 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-util\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.207740 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.207756 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tz4qm\" (UniqueName: \"kubernetes.io/projected/d79fcc60-85eb-450d-8d37-5b00b0af4ea0-kube-api-access-tz4qm\") on node \"crc\" DevicePath \"\"" Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.766900 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" event={"ID":"d79fcc60-85eb-450d-8d37-5b00b0af4ea0","Type":"ContainerDied","Data":"522552c9b8aff6bc6ef251147b2ae68f37d674f5b7dba9c97a5d5a1d9afcfb65"} Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.766942 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="522552c9b8aff6bc6ef251147b2ae68f37d674f5b7dba9c97a5d5a1d9afcfb65" Jan 20 20:00:09 crc kubenswrapper[4948]: I0120 20:00:09.767029 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.424529 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld"] Jan 20 20:00:21 crc kubenswrapper[4948]: E0120 20:00:21.436030 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerName="extract" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.436053 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerName="extract" Jan 20 20:00:21 crc kubenswrapper[4948]: E0120 20:00:21.436075 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerName="util" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.436083 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerName="util" Jan 20 20:00:21 crc kubenswrapper[4948]: E0120 20:00:21.436098 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerName="pull" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.436105 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerName="pull" Jan 20 20:00:21 crc kubenswrapper[4948]: E0120 20:00:21.436117 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0573d7c9-3516-40cd-a9f5-3f8e99ad8c39" containerName="collect-profiles" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.436127 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0573d7c9-3516-40cd-a9f5-3f8e99ad8c39" containerName="collect-profiles" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.436279 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0573d7c9-3516-40cd-a9f5-3f8e99ad8c39" containerName="collect-profiles" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.436310 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d79fcc60-85eb-450d-8d37-5b00b0af4ea0" containerName="extract" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.436774 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.442388 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.442744 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.442476 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.442654 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.443093 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-s59q6" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.469827 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld"] Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.627797 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-webhook-cert\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.627895 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlng4\" (UniqueName: \"kubernetes.io/projected/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-kube-api-access-wlng4\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.629276 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-apiservice-cert\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.730179 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-apiservice-cert\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.730230 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-webhook-cert\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.730284 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlng4\" (UniqueName: \"kubernetes.io/projected/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-kube-api-access-wlng4\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.736441 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-webhook-cert\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.750069 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-apiservice-cert\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.756518 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlng4\" (UniqueName: \"kubernetes.io/projected/a422b9d2-2fe8-485a-a7c7-fb0fa96706c9-kube-api-access-wlng4\") pod \"metallb-operator-controller-manager-7998c69bcc-rkwld\" (UID: \"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9\") " pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.777886 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-989f8776d-mst22"] Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.778584 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.782026 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.782169 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.782182 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-swvgf" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.801433 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-989f8776d-mst22"] Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.823918 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.946675 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-webhook-cert\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.946817 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-apiservice-cert\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:21 crc kubenswrapper[4948]: I0120 20:00:21.946845 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgxbx\" (UniqueName: \"kubernetes.io/projected/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-kube-api-access-cgxbx\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.048027 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-webhook-cert\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.048134 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-apiservice-cert\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.048185 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgxbx\" (UniqueName: \"kubernetes.io/projected/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-kube-api-access-cgxbx\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.059167 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-webhook-cert\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.070152 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-apiservice-cert\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.179865 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgxbx\" (UniqueName: \"kubernetes.io/projected/3eb6ce14-f5fb-4e93-8f16-d4b0eec67237-kube-api-access-cgxbx\") pod \"metallb-operator-webhook-server-989f8776d-mst22\" (UID: \"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237\") " pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.408279 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.653449 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld"] Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.879194 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" event={"ID":"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9","Type":"ContainerStarted","Data":"8345bbf84ff65a8b5872f505e60cccf9b026b7b158e0d5c0ec4f94eebf727914"} Jan 20 20:00:22 crc kubenswrapper[4948]: I0120 20:00:22.887083 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-989f8776d-mst22"] Jan 20 20:00:23 crc kubenswrapper[4948]: I0120 20:00:23.885076 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" event={"ID":"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237","Type":"ContainerStarted","Data":"b5cda0475a0b053d8032d1265a954874d89fa6f0eae1fbc97ec17540baa33cc8"} Jan 20 20:00:29 crc kubenswrapper[4948]: I0120 20:00:29.959502 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" event={"ID":"a422b9d2-2fe8-485a-a7c7-fb0fa96706c9","Type":"ContainerStarted","Data":"d3d4026b1a910adec4b12ca0bca5f987c8665f4f1a804874f0043e99a86ac934"} Jan 20 20:00:29 crc kubenswrapper[4948]: I0120 20:00:29.960167 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:00:30 crc kubenswrapper[4948]: I0120 20:00:30.009941 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" podStartSLOduration=2.417350617 podStartE2EDuration="9.009903614s" podCreationTimestamp="2026-01-20 20:00:21 +0000 UTC" firstStartedPulling="2026-01-20 20:00:22.675137182 +0000 UTC m=+650.625862151" lastFinishedPulling="2026-01-20 20:00:29.267690179 +0000 UTC m=+657.218415148" observedRunningTime="2026-01-20 20:00:29.996848386 +0000 UTC m=+657.947573355" watchObservedRunningTime="2026-01-20 20:00:30.009903614 +0000 UTC m=+657.960628583" Jan 20 20:00:33 crc kubenswrapper[4948]: I0120 20:00:33.984678 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" event={"ID":"3eb6ce14-f5fb-4e93-8f16-d4b0eec67237","Type":"ContainerStarted","Data":"9312e6a00673165b091d4db6307e2a17d8c79c4542ba1bc8c5a48ea5ae777485"} Jan 20 20:00:33 crc kubenswrapper[4948]: I0120 20:00:33.985366 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:00:34 crc kubenswrapper[4948]: I0120 20:00:34.011373 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" podStartSLOduration=2.947634409 podStartE2EDuration="13.011356726s" podCreationTimestamp="2026-01-20 20:00:21 +0000 UTC" firstStartedPulling="2026-01-20 20:00:22.898942385 +0000 UTC m=+650.849667354" lastFinishedPulling="2026-01-20 20:00:32.962664702 +0000 UTC m=+660.913389671" observedRunningTime="2026-01-20 20:00:34.006814238 +0000 UTC m=+661.957539217" watchObservedRunningTime="2026-01-20 20:00:34.011356726 +0000 UTC m=+661.962081695" Jan 20 20:00:52 crc kubenswrapper[4948]: I0120 20:00:52.418931 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-989f8776d-mst22" Jan 20 20:01:01 crc kubenswrapper[4948]: I0120 20:01:01.827030 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7998c69bcc-rkwld" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.538949 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-khbv6"] Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.542023 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.544653 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.544742 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-tk29s" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.544841 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.547237 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc"] Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.548132 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.549699 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.563342 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc"] Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646421 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgznm\" (UniqueName: \"kubernetes.io/projected/2f322a0b-2e68-429d-b734-c7e20e346a47-kube-api-access-zgznm\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646511 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-sockets\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646540 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-conf\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646555 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-startup\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646664 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646748 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06d4b8b1-3c5f-4736-9492-bc33db43f510-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-mxgmc\" (UID: \"06d4b8b1-3c5f-4736-9492-bc33db43f510\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646771 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics-certs\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646795 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-reloader\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.646827 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7gd6\" (UniqueName: \"kubernetes.io/projected/06d4b8b1-3c5f-4736-9492-bc33db43f510-kube-api-access-p7gd6\") pod \"frr-k8s-webhook-server-7df86c4f6c-mxgmc\" (UID: \"06d4b8b1-3c5f-4736-9492-bc33db43f510\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.696168 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-fl6v6"] Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.697076 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: W0120 20:01:02.701486 4948 reflector.go:561] object-"metallb-system"/"metallb-memberlist": failed to list *v1.Secret: secrets "metallb-memberlist" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Jan 20 20:01:02 crc kubenswrapper[4948]: E0120 20:01:02.701785 4948 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"metallb-memberlist\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"metallb-memberlist\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.703675 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.703974 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.704176 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-4sk2s" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.719624 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-q4qhx"] Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.720639 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.724183 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.739590 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-q4qhx"] Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.747958 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748157 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06d4b8b1-3c5f-4736-9492-bc33db43f510-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-mxgmc\" (UID: \"06d4b8b1-3c5f-4736-9492-bc33db43f510\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748247 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics-certs\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748321 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-reloader\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748388 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7gd6\" (UniqueName: \"kubernetes.io/projected/06d4b8b1-3c5f-4736-9492-bc33db43f510-kube-api-access-p7gd6\") pod \"frr-k8s-webhook-server-7df86c4f6c-mxgmc\" (UID: \"06d4b8b1-3c5f-4736-9492-bc33db43f510\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748466 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748485 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgznm\" (UniqueName: \"kubernetes.io/projected/2f322a0b-2e68-429d-b734-c7e20e346a47-kube-api-access-zgznm\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748611 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-sockets\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748680 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-conf\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748789 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-startup\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.748914 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-reloader\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: E0120 20:01:02.748998 4948 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 20 20:01:02 crc kubenswrapper[4948]: E0120 20:01:02.749042 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/06d4b8b1-3c5f-4736-9492-bc33db43f510-cert podName:06d4b8b1-3c5f-4736-9492-bc33db43f510 nodeName:}" failed. No retries permitted until 2026-01-20 20:01:03.249027791 +0000 UTC m=+691.199752760 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/06d4b8b1-3c5f-4736-9492-bc33db43f510-cert") pod "frr-k8s-webhook-server-7df86c4f6c-mxgmc" (UID: "06d4b8b1-3c5f-4736-9492-bc33db43f510") : secret "frr-k8s-webhook-server-cert" not found Jan 20 20:01:02 crc kubenswrapper[4948]: E0120 20:01:02.749252 4948 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 20 20:01:02 crc kubenswrapper[4948]: E0120 20:01:02.749282 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics-certs podName:2f322a0b-2e68-429d-b734-c7e20e346a47 nodeName:}" failed. No retries permitted until 2026-01-20 20:01:03.249275638 +0000 UTC m=+691.200000607 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics-certs") pod "frr-k8s-khbv6" (UID: "2f322a0b-2e68-429d-b734-c7e20e346a47") : secret "frr-k8s-certs-secret" not found Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.749732 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-sockets\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.749927 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-conf\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.750164 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2f322a0b-2e68-429d-b734-c7e20e346a47-frr-startup\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.774094 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7gd6\" (UniqueName: \"kubernetes.io/projected/06d4b8b1-3c5f-4736-9492-bc33db43f510-kube-api-access-p7gd6\") pod \"frr-k8s-webhook-server-7df86c4f6c-mxgmc\" (UID: \"06d4b8b1-3c5f-4736-9492-bc33db43f510\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.798407 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgznm\" (UniqueName: \"kubernetes.io/projected/2f322a0b-2e68-429d-b734-c7e20e346a47-kube-api-access-zgznm\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.850312 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/04d1e8ae-e88d-4357-87c8-c15899e9ce23-metrics-certs\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.850448 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/04d1e8ae-e88d-4357-87c8-c15899e9ce23-cert\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.850541 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcrch\" (UniqueName: \"kubernetes.io/projected/04d1e8ae-e88d-4357-87c8-c15899e9ce23-kube-api-access-mcrch\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.850625 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-metrics-certs\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.850683 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/9a99fce2-43d3-43f4-bada-ca2b9f94673c-metallb-excludel2\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.850736 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.850765 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9l2w\" (UniqueName: \"kubernetes.io/projected/9a99fce2-43d3-43f4-bada-ca2b9f94673c-kube-api-access-s9l2w\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.952646 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/04d1e8ae-e88d-4357-87c8-c15899e9ce23-metrics-certs\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.953218 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/04d1e8ae-e88d-4357-87c8-c15899e9ce23-cert\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.953312 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcrch\" (UniqueName: \"kubernetes.io/projected/04d1e8ae-e88d-4357-87c8-c15899e9ce23-kube-api-access-mcrch\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.953534 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-metrics-certs\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.953646 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/9a99fce2-43d3-43f4-bada-ca2b9f94673c-metallb-excludel2\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.953742 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.953829 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9l2w\" (UniqueName: \"kubernetes.io/projected/9a99fce2-43d3-43f4-bada-ca2b9f94673c-kube-api-access-s9l2w\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.956166 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/9a99fce2-43d3-43f4-bada-ca2b9f94673c-metallb-excludel2\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.957915 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.958320 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/04d1e8ae-e88d-4357-87c8-c15899e9ce23-metrics-certs\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.958564 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-metrics-certs\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.969128 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/04d1e8ae-e88d-4357-87c8-c15899e9ce23-cert\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.975598 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcrch\" (UniqueName: \"kubernetes.io/projected/04d1e8ae-e88d-4357-87c8-c15899e9ce23-kube-api-access-mcrch\") pod \"controller-6968d8fdc4-q4qhx\" (UID: \"04d1e8ae-e88d-4357-87c8-c15899e9ce23\") " pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:02 crc kubenswrapper[4948]: I0120 20:01:02.978526 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9l2w\" (UniqueName: \"kubernetes.io/projected/9a99fce2-43d3-43f4-bada-ca2b9f94673c-kube-api-access-s9l2w\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.034752 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.257336 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06d4b8b1-3c5f-4736-9492-bc33db43f510-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-mxgmc\" (UID: \"06d4b8b1-3c5f-4736-9492-bc33db43f510\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.257389 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics-certs\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.271307 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2f322a0b-2e68-429d-b734-c7e20e346a47-metrics-certs\") pod \"frr-k8s-khbv6\" (UID: \"2f322a0b-2e68-429d-b734-c7e20e346a47\") " pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.271392 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/06d4b8b1-3c5f-4736-9492-bc33db43f510-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-mxgmc\" (UID: \"06d4b8b1-3c5f-4736-9492-bc33db43f510\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.297050 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-q4qhx"] Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.463933 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.476685 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:03 crc kubenswrapper[4948]: I0120 20:01:03.705847 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc"] Jan 20 20:01:03 crc kubenswrapper[4948]: W0120 20:01:03.709849 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06d4b8b1_3c5f_4736_9492_bc33db43f510.slice/crio-563ef01e2caaa5ece273ab7f9c6d2690c3675dd181090922b8aea6ace5b9ffd1 WatchSource:0}: Error finding container 563ef01e2caaa5ece273ab7f9c6d2690c3675dd181090922b8aea6ace5b9ffd1: Status 404 returned error can't find the container with id 563ef01e2caaa5ece273ab7f9c6d2690c3675dd181090922b8aea6ace5b9ffd1 Jan 20 20:01:03 crc kubenswrapper[4948]: E0120 20:01:03.955299 4948 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: failed to sync secret cache: timed out waiting for the condition Jan 20 20:01:03 crc kubenswrapper[4948]: E0120 20:01:03.955426 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist podName:9a99fce2-43d3-43f4-bada-ca2b9f94673c nodeName:}" failed. No retries permitted until 2026-01-20 20:01:04.455404341 +0000 UTC m=+692.406129310 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist") pod "speaker-fl6v6" (UID: "9a99fce2-43d3-43f4-bada-ca2b9f94673c") : failed to sync secret cache: timed out waiting for the condition Jan 20 20:01:04 crc kubenswrapper[4948]: I0120 20:01:04.194143 4948 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 20 20:01:04 crc kubenswrapper[4948]: I0120 20:01:04.306323 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" event={"ID":"06d4b8b1-3c5f-4736-9492-bc33db43f510","Type":"ContainerStarted","Data":"563ef01e2caaa5ece273ab7f9c6d2690c3675dd181090922b8aea6ace5b9ffd1"} Jan 20 20:01:04 crc kubenswrapper[4948]: I0120 20:01:04.307465 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-q4qhx" event={"ID":"04d1e8ae-e88d-4357-87c8-c15899e9ce23","Type":"ContainerStarted","Data":"f71e97c495a22169ad2e21488128fdce065fbe1e6f16192d93060acd1e5f5b7c"} Jan 20 20:01:04 crc kubenswrapper[4948]: I0120 20:01:04.478998 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:04 crc kubenswrapper[4948]: E0120 20:01:04.479157 4948 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 20 20:01:04 crc kubenswrapper[4948]: E0120 20:01:04.479228 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist podName:9a99fce2-43d3-43f4-bada-ca2b9f94673c nodeName:}" failed. No retries permitted until 2026-01-20 20:01:05.479212953 +0000 UTC m=+693.429937922 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist") pod "speaker-fl6v6" (UID: "9a99fce2-43d3-43f4-bada-ca2b9f94673c") : secret "metallb-memberlist" not found Jan 20 20:01:05 crc kubenswrapper[4948]: I0120 20:01:05.318917 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-q4qhx" event={"ID":"04d1e8ae-e88d-4357-87c8-c15899e9ce23","Type":"ContainerStarted","Data":"66de442d711b29544d58ddeb1999a0185f9aa67fe837412380d8cff358448dd0"} Jan 20 20:01:05 crc kubenswrapper[4948]: I0120 20:01:05.319414 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:05 crc kubenswrapper[4948]: I0120 20:01:05.319428 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-q4qhx" event={"ID":"04d1e8ae-e88d-4357-87c8-c15899e9ce23","Type":"ContainerStarted","Data":"e4e5203551fb5142a809edd5541d6ecde58a1e22d5d7538e9a084f003cbb7b55"} Jan 20 20:01:05 crc kubenswrapper[4948]: I0120 20:01:05.320695 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerStarted","Data":"4d17a76fcc0f149cb3bb5046036359917a763cc114524c04fff3c19b5d957b54"} Jan 20 20:01:05 crc kubenswrapper[4948]: I0120 20:01:05.352584 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-q4qhx" podStartSLOduration=3.352549896 podStartE2EDuration="3.352549896s" podCreationTimestamp="2026-01-20 20:01:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:01:05.347296578 +0000 UTC m=+693.298021567" watchObservedRunningTime="2026-01-20 20:01:05.352549896 +0000 UTC m=+693.303274865" Jan 20 20:01:05 crc kubenswrapper[4948]: I0120 20:01:05.496874 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:05 crc kubenswrapper[4948]: I0120 20:01:05.519008 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/9a99fce2-43d3-43f4-bada-ca2b9f94673c-memberlist\") pod \"speaker-fl6v6\" (UID: \"9a99fce2-43d3-43f4-bada-ca2b9f94673c\") " pod="metallb-system/speaker-fl6v6" Jan 20 20:01:05 crc kubenswrapper[4948]: I0120 20:01:05.714818 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-fl6v6" Jan 20 20:01:05 crc kubenswrapper[4948]: W0120 20:01:05.741155 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9a99fce2_43d3_43f4_bada_ca2b9f94673c.slice/crio-cfa012df204d44b11ec045e75aa272429d56cf2ead7fe9baae241beb85683b7d WatchSource:0}: Error finding container cfa012df204d44b11ec045e75aa272429d56cf2ead7fe9baae241beb85683b7d: Status 404 returned error can't find the container with id cfa012df204d44b11ec045e75aa272429d56cf2ead7fe9baae241beb85683b7d Jan 20 20:01:06 crc kubenswrapper[4948]: I0120 20:01:06.338090 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fl6v6" event={"ID":"9a99fce2-43d3-43f4-bada-ca2b9f94673c","Type":"ContainerStarted","Data":"cfa012df204d44b11ec045e75aa272429d56cf2ead7fe9baae241beb85683b7d"} Jan 20 20:01:07 crc kubenswrapper[4948]: I0120 20:01:07.367759 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fl6v6" event={"ID":"9a99fce2-43d3-43f4-bada-ca2b9f94673c","Type":"ContainerStarted","Data":"6368c698368986cf0f5e95830b0189519d138ea5920c2428586bcaeefe670d4f"} Jan 20 20:01:07 crc kubenswrapper[4948]: I0120 20:01:07.368209 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fl6v6" event={"ID":"9a99fce2-43d3-43f4-bada-ca2b9f94673c","Type":"ContainerStarted","Data":"4548432091d03003cc9252f618dbe09e9964f084c83331c91bfd14766fc44045"} Jan 20 20:01:07 crc kubenswrapper[4948]: I0120 20:01:07.368517 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-fl6v6" Jan 20 20:01:07 crc kubenswrapper[4948]: I0120 20:01:07.419487 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-fl6v6" podStartSLOduration=5.419465758 podStartE2EDuration="5.419465758s" podCreationTimestamp="2026-01-20 20:01:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:01:07.414809707 +0000 UTC m=+695.365534676" watchObservedRunningTime="2026-01-20 20:01:07.419465758 +0000 UTC m=+695.370190727" Jan 20 20:01:19 crc kubenswrapper[4948]: I0120 20:01:19.463832 4948 generic.go:334] "Generic (PLEG): container finished" podID="2f322a0b-2e68-429d-b734-c7e20e346a47" containerID="252ed321b253bb857d504170e1ae2b4e5a01b05857467037939b137df7c75a0e" exitCode=0 Jan 20 20:01:19 crc kubenswrapper[4948]: I0120 20:01:19.463889 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerDied","Data":"252ed321b253bb857d504170e1ae2b4e5a01b05857467037939b137df7c75a0e"} Jan 20 20:01:19 crc kubenswrapper[4948]: I0120 20:01:19.465803 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" event={"ID":"06d4b8b1-3c5f-4736-9492-bc33db43f510","Type":"ContainerStarted","Data":"ead428c2a5a7d9d2560106e878dd87ed9c8e55e25375e1ed0ea7afe5d2ac057a"} Jan 20 20:01:19 crc kubenswrapper[4948]: I0120 20:01:19.465958 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:20 crc kubenswrapper[4948]: I0120 20:01:20.249843 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:01:20 crc kubenswrapper[4948]: I0120 20:01:20.250206 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:01:20 crc kubenswrapper[4948]: I0120 20:01:20.474077 4948 generic.go:334] "Generic (PLEG): container finished" podID="2f322a0b-2e68-429d-b734-c7e20e346a47" containerID="084f0d86dfd0439c94541966e5b4704a1ac4f85c997236faf0d246f192aab001" exitCode=0 Jan 20 20:01:20 crc kubenswrapper[4948]: I0120 20:01:20.474171 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerDied","Data":"084f0d86dfd0439c94541966e5b4704a1ac4f85c997236faf0d246f192aab001"} Jan 20 20:01:20 crc kubenswrapper[4948]: I0120 20:01:20.514793 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" podStartSLOduration=3.261970767 podStartE2EDuration="18.514774467s" podCreationTimestamp="2026-01-20 20:01:02 +0000 UTC" firstStartedPulling="2026-01-20 20:01:03.711765118 +0000 UTC m=+691.662490087" lastFinishedPulling="2026-01-20 20:01:18.964568818 +0000 UTC m=+706.915293787" observedRunningTime="2026-01-20 20:01:19.532412638 +0000 UTC m=+707.483137637" watchObservedRunningTime="2026-01-20 20:01:20.514774467 +0000 UTC m=+708.465499446" Jan 20 20:01:21 crc kubenswrapper[4948]: I0120 20:01:21.486595 4948 generic.go:334] "Generic (PLEG): container finished" podID="2f322a0b-2e68-429d-b734-c7e20e346a47" containerID="a8d84f8f05c767e404619a212ce3e5757851f760f33c2fc62a733d28f6bfde5a" exitCode=0 Jan 20 20:01:21 crc kubenswrapper[4948]: I0120 20:01:21.489916 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerDied","Data":"a8d84f8f05c767e404619a212ce3e5757851f760f33c2fc62a733d28f6bfde5a"} Jan 20 20:01:22 crc kubenswrapper[4948]: I0120 20:01:22.503990 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerStarted","Data":"425932a443baa797ffb6e6ef9cfd8e87c49275f68971cd06d57662b1bec4af14"} Jan 20 20:01:22 crc kubenswrapper[4948]: I0120 20:01:22.504551 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerStarted","Data":"d484b519564547caff00be28ee634e45c41400e9f62d8adfdb17f3f072bb9c42"} Jan 20 20:01:22 crc kubenswrapper[4948]: I0120 20:01:22.504569 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerStarted","Data":"8511c5ad925b841cbed1f7293742bc158130b3928eef68c1daa7371e5e5bab00"} Jan 20 20:01:22 crc kubenswrapper[4948]: I0120 20:01:22.504581 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerStarted","Data":"806e4924239a9d4c130639b59f3459a873a323d8e1984ea3dc54670eb461f56d"} Jan 20 20:01:22 crc kubenswrapper[4948]: I0120 20:01:22.504589 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerStarted","Data":"3b2ac036aee129b029daf75050316b57cc42ccf4574d3b6427968eaf38b8bc42"} Jan 20 20:01:23 crc kubenswrapper[4948]: I0120 20:01:23.039079 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-q4qhx" Jan 20 20:01:23 crc kubenswrapper[4948]: I0120 20:01:23.520982 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-khbv6" event={"ID":"2f322a0b-2e68-429d-b734-c7e20e346a47","Type":"ContainerStarted","Data":"67b371589ba1b90945d14afe2911ea47ea4388857b52eb7de14749f3606fb583"} Jan 20 20:01:23 crc kubenswrapper[4948]: I0120 20:01:23.521257 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:25 crc kubenswrapper[4948]: I0120 20:01:25.719036 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-fl6v6" Jan 20 20:01:25 crc kubenswrapper[4948]: I0120 20:01:25.739662 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-khbv6" podStartSLOduration=9.274146692 podStartE2EDuration="23.739643318s" podCreationTimestamp="2026-01-20 20:01:02 +0000 UTC" firstStartedPulling="2026-01-20 20:01:04.521956465 +0000 UTC m=+692.472681434" lastFinishedPulling="2026-01-20 20:01:18.987453091 +0000 UTC m=+706.938178060" observedRunningTime="2026-01-20 20:01:23.557899287 +0000 UTC m=+711.508624256" watchObservedRunningTime="2026-01-20 20:01:25.739643318 +0000 UTC m=+713.690368287" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.464800 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.552340 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.579783 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-fckw5"] Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.580826 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.591018 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.591355 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-72bqc" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.592037 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.606626 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-fckw5"] Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.638815 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgf9v\" (UniqueName: \"kubernetes.io/projected/e98fafb2-a9ef-4252-a236-be3c009d42b2-kube-api-access-sgf9v\") pod \"openstack-operator-index-fckw5\" (UID: \"e98fafb2-a9ef-4252-a236-be3c009d42b2\") " pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.740680 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgf9v\" (UniqueName: \"kubernetes.io/projected/e98fafb2-a9ef-4252-a236-be3c009d42b2-kube-api-access-sgf9v\") pod \"openstack-operator-index-fckw5\" (UID: \"e98fafb2-a9ef-4252-a236-be3c009d42b2\") " pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.760502 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgf9v\" (UniqueName: \"kubernetes.io/projected/e98fafb2-a9ef-4252-a236-be3c009d42b2-kube-api-access-sgf9v\") pod \"openstack-operator-index-fckw5\" (UID: \"e98fafb2-a9ef-4252-a236-be3c009d42b2\") " pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:28 crc kubenswrapper[4948]: I0120 20:01:28.905226 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:29 crc kubenswrapper[4948]: I0120 20:01:29.201033 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-fckw5"] Jan 20 20:01:29 crc kubenswrapper[4948]: I0120 20:01:29.558357 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fckw5" event={"ID":"e98fafb2-a9ef-4252-a236-be3c009d42b2","Type":"ContainerStarted","Data":"5552650ba41601ea44105030e2fee487fa6e9a6ba8d4b1c9408d48a3fd718b13"} Jan 20 20:01:31 crc kubenswrapper[4948]: I0120 20:01:31.572321 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-fckw5" event={"ID":"e98fafb2-a9ef-4252-a236-be3c009d42b2","Type":"ContainerStarted","Data":"857a1db03e8c20811bd4dbdd1b1331b46fd0ba4be0c20580d2372de6c921a72d"} Jan 20 20:01:31 crc kubenswrapper[4948]: I0120 20:01:31.590108 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-fckw5" podStartSLOduration=1.371111232 podStartE2EDuration="3.590089485s" podCreationTimestamp="2026-01-20 20:01:28 +0000 UTC" firstStartedPulling="2026-01-20 20:01:29.200425272 +0000 UTC m=+717.151150231" lastFinishedPulling="2026-01-20 20:01:31.419403515 +0000 UTC m=+719.370128484" observedRunningTime="2026-01-20 20:01:31.585420993 +0000 UTC m=+719.536145962" watchObservedRunningTime="2026-01-20 20:01:31.590089485 +0000 UTC m=+719.540814454" Jan 20 20:01:33 crc kubenswrapper[4948]: I0120 20:01:33.467102 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-khbv6" Jan 20 20:01:33 crc kubenswrapper[4948]: I0120 20:01:33.492016 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-mxgmc" Jan 20 20:01:36 crc kubenswrapper[4948]: I0120 20:01:36.926136 4948 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 20 20:01:38 crc kubenswrapper[4948]: I0120 20:01:38.963631 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:38 crc kubenswrapper[4948]: I0120 20:01:38.963767 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:38 crc kubenswrapper[4948]: I0120 20:01:38.991460 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:39 crc kubenswrapper[4948]: I0120 20:01:39.657657 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-fckw5" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.373625 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8"] Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.375790 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.382892 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8"] Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.386129 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-n262w" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.523768 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-util\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.523857 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-bundle\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.523910 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spdg4\" (UniqueName: \"kubernetes.io/projected/349488b0-c355-4358-8fb2-1979301298a1-kube-api-access-spdg4\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.624988 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spdg4\" (UniqueName: \"kubernetes.io/projected/349488b0-c355-4358-8fb2-1979301298a1-kube-api-access-spdg4\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.625456 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-util\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.625962 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-util\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.626121 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-bundle\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.626397 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-bundle\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.645819 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spdg4\" (UniqueName: \"kubernetes.io/projected/349488b0-c355-4358-8fb2-1979301298a1-kube-api-access-spdg4\") pod \"a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:46 crc kubenswrapper[4948]: I0120 20:01:46.734014 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:47 crc kubenswrapper[4948]: I0120 20:01:47.182016 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8"] Jan 20 20:01:47 crc kubenswrapper[4948]: I0120 20:01:47.675220 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" event={"ID":"349488b0-c355-4358-8fb2-1979301298a1","Type":"ContainerStarted","Data":"9e125c8cde8bbebae35ca47f2217463eb728863ec93416f65c8fa814f6899c5d"} Jan 20 20:01:47 crc kubenswrapper[4948]: I0120 20:01:47.675541 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" event={"ID":"349488b0-c355-4358-8fb2-1979301298a1","Type":"ContainerStarted","Data":"5b16946d0058495347b6904e1463b6581d27e09c6935d48788e0e38b67fed395"} Jan 20 20:01:48 crc kubenswrapper[4948]: I0120 20:01:48.687354 4948 generic.go:334] "Generic (PLEG): container finished" podID="349488b0-c355-4358-8fb2-1979301298a1" containerID="9e125c8cde8bbebae35ca47f2217463eb728863ec93416f65c8fa814f6899c5d" exitCode=0 Jan 20 20:01:48 crc kubenswrapper[4948]: I0120 20:01:48.687418 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" event={"ID":"349488b0-c355-4358-8fb2-1979301298a1","Type":"ContainerDied","Data":"9e125c8cde8bbebae35ca47f2217463eb728863ec93416f65c8fa814f6899c5d"} Jan 20 20:01:49 crc kubenswrapper[4948]: I0120 20:01:49.694546 4948 generic.go:334] "Generic (PLEG): container finished" podID="349488b0-c355-4358-8fb2-1979301298a1" containerID="f26f72598ce5fb3320b9a6bbd9b7ebe81b2a921aac65b2c4b959dba654591e0d" exitCode=0 Jan 20 20:01:49 crc kubenswrapper[4948]: I0120 20:01:49.694585 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" event={"ID":"349488b0-c355-4358-8fb2-1979301298a1","Type":"ContainerDied","Data":"f26f72598ce5fb3320b9a6bbd9b7ebe81b2a921aac65b2c4b959dba654591e0d"} Jan 20 20:01:50 crc kubenswrapper[4948]: I0120 20:01:50.262651 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:01:50 crc kubenswrapper[4948]: I0120 20:01:50.262717 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:01:50 crc kubenswrapper[4948]: I0120 20:01:50.720230 4948 generic.go:334] "Generic (PLEG): container finished" podID="349488b0-c355-4358-8fb2-1979301298a1" containerID="d4e5cf1923f62584bd0ba2137178fb8ea2d3c8506d60660dd2219eda388a8ec9" exitCode=0 Jan 20 20:01:50 crc kubenswrapper[4948]: I0120 20:01:50.720289 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" event={"ID":"349488b0-c355-4358-8fb2-1979301298a1","Type":"ContainerDied","Data":"d4e5cf1923f62584bd0ba2137178fb8ea2d3c8506d60660dd2219eda388a8ec9"} Jan 20 20:01:51 crc kubenswrapper[4948]: I0120 20:01:51.952406 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.087495 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-bundle\") pod \"349488b0-c355-4358-8fb2-1979301298a1\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.087560 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-util\") pod \"349488b0-c355-4358-8fb2-1979301298a1\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.087585 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spdg4\" (UniqueName: \"kubernetes.io/projected/349488b0-c355-4358-8fb2-1979301298a1-kube-api-access-spdg4\") pod \"349488b0-c355-4358-8fb2-1979301298a1\" (UID: \"349488b0-c355-4358-8fb2-1979301298a1\") " Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.088623 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-bundle" (OuterVolumeSpecName: "bundle") pod "349488b0-c355-4358-8fb2-1979301298a1" (UID: "349488b0-c355-4358-8fb2-1979301298a1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.095150 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/349488b0-c355-4358-8fb2-1979301298a1-kube-api-access-spdg4" (OuterVolumeSpecName: "kube-api-access-spdg4") pod "349488b0-c355-4358-8fb2-1979301298a1" (UID: "349488b0-c355-4358-8fb2-1979301298a1"). InnerVolumeSpecName "kube-api-access-spdg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.102406 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-util" (OuterVolumeSpecName: "util") pod "349488b0-c355-4358-8fb2-1979301298a1" (UID: "349488b0-c355-4358-8fb2-1979301298a1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.188917 4948 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.188961 4948 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/349488b0-c355-4358-8fb2-1979301298a1-util\") on node \"crc\" DevicePath \"\"" Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.189007 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spdg4\" (UniqueName: \"kubernetes.io/projected/349488b0-c355-4358-8fb2-1979301298a1-kube-api-access-spdg4\") on node \"crc\" DevicePath \"\"" Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.735470 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" event={"ID":"349488b0-c355-4358-8fb2-1979301298a1","Type":"ContainerDied","Data":"5b16946d0058495347b6904e1463b6581d27e09c6935d48788e0e38b67fed395"} Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.735755 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b16946d0058495347b6904e1463b6581d27e09c6935d48788e0e38b67fed395" Jan 20 20:01:52 crc kubenswrapper[4948]: I0120 20:01:52.735569 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.380511 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh"] Jan 20 20:01:58 crc kubenswrapper[4948]: E0120 20:01:58.381291 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349488b0-c355-4358-8fb2-1979301298a1" containerName="util" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.381305 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="349488b0-c355-4358-8fb2-1979301298a1" containerName="util" Jan 20 20:01:58 crc kubenswrapper[4948]: E0120 20:01:58.381325 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349488b0-c355-4358-8fb2-1979301298a1" containerName="extract" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.381331 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="349488b0-c355-4358-8fb2-1979301298a1" containerName="extract" Jan 20 20:01:58 crc kubenswrapper[4948]: E0120 20:01:58.381345 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349488b0-c355-4358-8fb2-1979301298a1" containerName="pull" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.381352 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="349488b0-c355-4358-8fb2-1979301298a1" containerName="pull" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.381489 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="349488b0-c355-4358-8fb2-1979301298a1" containerName="extract" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.381983 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.392591 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-w2d75" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.493083 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh"] Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.524077 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rhhl\" (UniqueName: \"kubernetes.io/projected/6d523c92-ebbc-4860-9bcc-45ef88372f2b-kube-api-access-6rhhl\") pod \"openstack-operator-controller-init-5fcf846598-7x9nh\" (UID: \"6d523c92-ebbc-4860-9bcc-45ef88372f2b\") " pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.624879 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rhhl\" (UniqueName: \"kubernetes.io/projected/6d523c92-ebbc-4860-9bcc-45ef88372f2b-kube-api-access-6rhhl\") pod \"openstack-operator-controller-init-5fcf846598-7x9nh\" (UID: \"6d523c92-ebbc-4860-9bcc-45ef88372f2b\") " pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.644258 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rhhl\" (UniqueName: \"kubernetes.io/projected/6d523c92-ebbc-4860-9bcc-45ef88372f2b-kube-api-access-6rhhl\") pod \"openstack-operator-controller-init-5fcf846598-7x9nh\" (UID: \"6d523c92-ebbc-4860-9bcc-45ef88372f2b\") " pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.701523 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" Jan 20 20:01:58 crc kubenswrapper[4948]: I0120 20:01:58.946142 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh"] Jan 20 20:01:58 crc kubenswrapper[4948]: W0120 20:01:58.965902 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d523c92_ebbc_4860_9bcc_45ef88372f2b.slice/crio-6d3666af05a0302d7f630f05a237590841bee2868293ec0620e65aa2b0fd9e98 WatchSource:0}: Error finding container 6d3666af05a0302d7f630f05a237590841bee2868293ec0620e65aa2b0fd9e98: Status 404 returned error can't find the container with id 6d3666af05a0302d7f630f05a237590841bee2868293ec0620e65aa2b0fd9e98 Jan 20 20:01:59 crc kubenswrapper[4948]: I0120 20:01:59.822916 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" event={"ID":"6d523c92-ebbc-4860-9bcc-45ef88372f2b","Type":"ContainerStarted","Data":"6d3666af05a0302d7f630f05a237590841bee2868293ec0620e65aa2b0fd9e98"} Jan 20 20:02:06 crc kubenswrapper[4948]: I0120 20:02:06.882118 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" event={"ID":"6d523c92-ebbc-4860-9bcc-45ef88372f2b","Type":"ContainerStarted","Data":"82ddbe635e85f1fd067306a85f3a034ea9a00b1214de784adca48114810106d5"} Jan 20 20:02:06 crc kubenswrapper[4948]: I0120 20:02:06.882742 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" Jan 20 20:02:06 crc kubenswrapper[4948]: I0120 20:02:06.947598 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" podStartSLOduration=2.028734754 podStartE2EDuration="8.947573847s" podCreationTimestamp="2026-01-20 20:01:58 +0000 UTC" firstStartedPulling="2026-01-20 20:01:58.974791461 +0000 UTC m=+746.925516430" lastFinishedPulling="2026-01-20 20:02:05.893630554 +0000 UTC m=+753.844355523" observedRunningTime="2026-01-20 20:02:06.912512626 +0000 UTC m=+754.863237605" watchObservedRunningTime="2026-01-20 20:02:06.947573847 +0000 UTC m=+754.898298816" Jan 20 20:02:18 crc kubenswrapper[4948]: I0120 20:02:18.704086 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-5fcf846598-7x9nh" Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.250314 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.250672 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.250743 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.251508 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d62e03ef00dbbeb77df97565ffab795a12284dfbc62cb77594b2a0a88f280a6c"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.251644 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://d62e03ef00dbbeb77df97565ffab795a12284dfbc62cb77594b2a0a88f280a6c" gracePeriod=600 Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.971488 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="d62e03ef00dbbeb77df97565ffab795a12284dfbc62cb77594b2a0a88f280a6c" exitCode=0 Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.971558 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"d62e03ef00dbbeb77df97565ffab795a12284dfbc62cb77594b2a0a88f280a6c"} Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.971868 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"8ea9bb8d6d2b455140d4d17b9b3ddbc16caa6ff50e9a5f66da80be0038f97979"} Jan 20 20:02:20 crc kubenswrapper[4948]: I0120 20:02:20.971896 4948 scope.go:117] "RemoveContainer" containerID="e049e149f0a0dc1b1b363bfb2d9bdbd795da8ca2d31406285050192b1751620d" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.333698 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.335361 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.340583 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-59jpp" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.342060 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.343174 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.348728 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-lt9ph" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.352834 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.354018 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.356969 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-km2z8" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.360095 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.369476 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.379302 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-795jl\" (UniqueName: \"kubernetes.io/projected/d6a36d62-a638-45c5-956a-12cb6f1ced24-kube-api-access-795jl\") pod \"cinder-operator-controller-manager-9b68f5989-2k89b\" (UID: \"d6a36d62-a638-45c5-956a-12cb6f1ced24\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.379485 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czgvn\" (UniqueName: \"kubernetes.io/projected/ef41048d-32d0-4b45-98ef-181e13e62c26-kube-api-access-czgvn\") pod \"barbican-operator-controller-manager-7ddb5c749-6vfzk\" (UID: \"ef41048d-32d0-4b45-98ef-181e13e62c26\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.379525 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqx9g\" (UniqueName: \"kubernetes.io/projected/d507465c-a0e3-494e-9e20-ef8c3517e059-kube-api-access-zqx9g\") pod \"designate-operator-controller-manager-9f958b845-6mp4q\" (UID: \"d507465c-a0e3-494e-9e20-ef8c3517e059\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.483138 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-795jl\" (UniqueName: \"kubernetes.io/projected/d6a36d62-a638-45c5-956a-12cb6f1ced24-kube-api-access-795jl\") pod \"cinder-operator-controller-manager-9b68f5989-2k89b\" (UID: \"d6a36d62-a638-45c5-956a-12cb6f1ced24\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.483601 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czgvn\" (UniqueName: \"kubernetes.io/projected/ef41048d-32d0-4b45-98ef-181e13e62c26-kube-api-access-czgvn\") pod \"barbican-operator-controller-manager-7ddb5c749-6vfzk\" (UID: \"ef41048d-32d0-4b45-98ef-181e13e62c26\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.483698 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqx9g\" (UniqueName: \"kubernetes.io/projected/d507465c-a0e3-494e-9e20-ef8c3517e059-kube-api-access-zqx9g\") pod \"designate-operator-controller-manager-9f958b845-6mp4q\" (UID: \"d507465c-a0e3-494e-9e20-ef8c3517e059\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.499610 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.548988 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-795jl\" (UniqueName: \"kubernetes.io/projected/d6a36d62-a638-45c5-956a-12cb6f1ced24-kube-api-access-795jl\") pod \"cinder-operator-controller-manager-9b68f5989-2k89b\" (UID: \"d6a36d62-a638-45c5-956a-12cb6f1ced24\") " pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.558293 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqx9g\" (UniqueName: \"kubernetes.io/projected/d507465c-a0e3-494e-9e20-ef8c3517e059-kube-api-access-zqx9g\") pod \"designate-operator-controller-manager-9f958b845-6mp4q\" (UID: \"d507465c-a0e3-494e-9e20-ef8c3517e059\") " pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.558849 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czgvn\" (UniqueName: \"kubernetes.io/projected/ef41048d-32d0-4b45-98ef-181e13e62c26-kube-api-access-czgvn\") pod \"barbican-operator-controller-manager-7ddb5c749-6vfzk\" (UID: \"ef41048d-32d0-4b45-98ef-181e13e62c26\") " pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.600728 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.601375 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.601484 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.608592 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-k8jxn" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.638363 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.639107 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.643189 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-hfrjp" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.657745 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.664602 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.670464 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.671259 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.674846 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-b8v67" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.675188 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.676109 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.685747 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.687082 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-p9fdf" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.687123 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.687740 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grpsp\" (UniqueName: \"kubernetes.io/projected/b78116d1-a584-49fa-ab14-86f78ce62420-kube-api-access-grpsp\") pod \"glance-operator-controller-manager-c6994669c-x9hmd\" (UID: \"b78116d1-a584-49fa-ab14-86f78ce62420\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.695360 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.697621 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.700397 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.765786 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.766793 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.774622 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.784616 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-vxg5c" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.788577 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwc99\" (UniqueName: \"kubernetes.io/projected/09ceeac6-c058-41a8-a0d6-07b4bde73893-kube-api-access-jwc99\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.788652 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pvxt\" (UniqueName: \"kubernetes.io/projected/d8461566-61e6-495d-b1ad-c0178c2eb849-kube-api-access-9pvxt\") pod \"heat-operator-controller-manager-594c8c9d5d-m8f25\" (UID: \"d8461566-61e6-495d-b1ad-c0178c2eb849\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.788686 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grpsp\" (UniqueName: \"kubernetes.io/projected/b78116d1-a584-49fa-ab14-86f78ce62420-kube-api-access-grpsp\") pod \"glance-operator-controller-manager-c6994669c-x9hmd\" (UID: \"b78116d1-a584-49fa-ab14-86f78ce62420\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.788746 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9vpr\" (UniqueName: \"kubernetes.io/projected/6f758308-6a33-4dc5-996e-beae970d4083-kube-api-access-x9vpr\") pod \"horizon-operator-controller-manager-77d5c5b54f-b7j48\" (UID: \"6f758308-6a33-4dc5-996e-beae970d4083\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.788767 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.873777 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.874737 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.875889 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grpsp\" (UniqueName: \"kubernetes.io/projected/b78116d1-a584-49fa-ab14-86f78ce62420-kube-api-access-grpsp\") pod \"glance-operator-controller-manager-c6994669c-x9hmd\" (UID: \"b78116d1-a584-49fa-ab14-86f78ce62420\") " pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.882130 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-9xw9m" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.890741 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pvxt\" (UniqueName: \"kubernetes.io/projected/d8461566-61e6-495d-b1ad-c0178c2eb849-kube-api-access-9pvxt\") pod \"heat-operator-controller-manager-594c8c9d5d-m8f25\" (UID: \"d8461566-61e6-495d-b1ad-c0178c2eb849\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.890790 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9vpr\" (UniqueName: \"kubernetes.io/projected/6f758308-6a33-4dc5-996e-beae970d4083-kube-api-access-x9vpr\") pod \"horizon-operator-controller-manager-77d5c5b54f-b7j48\" (UID: \"6f758308-6a33-4dc5-996e-beae970d4083\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.890819 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.890872 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptcbj\" (UniqueName: \"kubernetes.io/projected/233a0ffe-a99e-4268-93ed-a2a20cb2c7ab-kube-api-access-ptcbj\") pod \"ironic-operator-controller-manager-78757b4889-6xdw4\" (UID: \"233a0ffe-a99e-4268-93ed-a2a20cb2c7ab\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.890918 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwc99\" (UniqueName: \"kubernetes.io/projected/09ceeac6-c058-41a8-a0d6-07b4bde73893-kube-api-access-jwc99\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:38 crc kubenswrapper[4948]: E0120 20:02:38.891487 4948 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:38 crc kubenswrapper[4948]: E0120 20:02:38.891533 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert podName:09ceeac6-c058-41a8-a0d6-07b4bde73893 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:39.391516024 +0000 UTC m=+787.342240993 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert") pod "infra-operator-controller-manager-77c48c7859-xgc4z" (UID: "09ceeac6-c058-41a8-a0d6-07b4bde73893") : secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.895795 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.896787 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.926518 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-mggzx" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.944400 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.949452 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj"] Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.958405 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwc99\" (UniqueName: \"kubernetes.io/projected/09ceeac6-c058-41a8-a0d6-07b4bde73893-kube-api-access-jwc99\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:38 crc kubenswrapper[4948]: I0120 20:02:38.969971 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pvxt\" (UniqueName: \"kubernetes.io/projected/d8461566-61e6-495d-b1ad-c0178c2eb849-kube-api-access-9pvxt\") pod \"heat-operator-controller-manager-594c8c9d5d-m8f25\" (UID: \"d8461566-61e6-495d-b1ad-c0178c2eb849\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:38.994739 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2dk7\" (UniqueName: \"kubernetes.io/projected/38d63cbf-6bc2-4c48-9905-88c65334d42a-kube-api-access-r2dk7\") pod \"manila-operator-controller-manager-864f6b75bf-snszj\" (UID: \"38d63cbf-6bc2-4c48-9905-88c65334d42a\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:38.994805 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptcbj\" (UniqueName: \"kubernetes.io/projected/233a0ffe-a99e-4268-93ed-a2a20cb2c7ab-kube-api-access-ptcbj\") pod \"ironic-operator-controller-manager-78757b4889-6xdw4\" (UID: \"233a0ffe-a99e-4268-93ed-a2a20cb2c7ab\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:38.994860 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29d52\" (UniqueName: \"kubernetes.io/projected/ed91900c-0efb-4184-8d92-d11fb7ae82b7-kube-api-access-29d52\") pod \"keystone-operator-controller-manager-767fdc4f47-hkwvp\" (UID: \"ed91900c-0efb-4184-8d92-d11fb7ae82b7\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.003724 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.004537 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.020281 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-nctsz" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.038027 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9vpr\" (UniqueName: \"kubernetes.io/projected/6f758308-6a33-4dc5-996e-beae970d4083-kube-api-access-x9vpr\") pod \"horizon-operator-controller-manager-77d5c5b54f-b7j48\" (UID: \"6f758308-6a33-4dc5-996e-beae970d4083\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.081087 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.086420 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptcbj\" (UniqueName: \"kubernetes.io/projected/233a0ffe-a99e-4268-93ed-a2a20cb2c7ab-kube-api-access-ptcbj\") pod \"ironic-operator-controller-manager-78757b4889-6xdw4\" (UID: \"233a0ffe-a99e-4268-93ed-a2a20cb2c7ab\") " pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.096263 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29d52\" (UniqueName: \"kubernetes.io/projected/ed91900c-0efb-4184-8d92-d11fb7ae82b7-kube-api-access-29d52\") pod \"keystone-operator-controller-manager-767fdc4f47-hkwvp\" (UID: \"ed91900c-0efb-4184-8d92-d11fb7ae82b7\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.096379 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2dk7\" (UniqueName: \"kubernetes.io/projected/38d63cbf-6bc2-4c48-9905-88c65334d42a-kube-api-access-r2dk7\") pod \"manila-operator-controller-manager-864f6b75bf-snszj\" (UID: \"38d63cbf-6bc2-4c48-9905-88c65334d42a\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.096417 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f9rh\" (UniqueName: \"kubernetes.io/projected/61ba0da3-99a5-4b43-a2fb-190260ab8f3a-kube-api-access-2f9rh\") pod \"mariadb-operator-controller-manager-c87fff755-7qmgq\" (UID: \"61ba0da3-99a5-4b43-a2fb-190260ab8f3a\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.098019 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.129766 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.157567 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.167996 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29d52\" (UniqueName: \"kubernetes.io/projected/ed91900c-0efb-4184-8d92-d11fb7ae82b7-kube-api-access-29d52\") pod \"keystone-operator-controller-manager-767fdc4f47-hkwvp\" (UID: \"ed91900c-0efb-4184-8d92-d11fb7ae82b7\") " pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.168562 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.169345 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.186081 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-vch7g" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.197201 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f9rh\" (UniqueName: \"kubernetes.io/projected/61ba0da3-99a5-4b43-a2fb-190260ab8f3a-kube-api-access-2f9rh\") pod \"mariadb-operator-controller-manager-c87fff755-7qmgq\" (UID: \"61ba0da3-99a5-4b43-a2fb-190260ab8f3a\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.197356 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2dk7\" (UniqueName: \"kubernetes.io/projected/38d63cbf-6bc2-4c48-9905-88c65334d42a-kube-api-access-r2dk7\") pod \"manila-operator-controller-manager-864f6b75bf-snszj\" (UID: \"38d63cbf-6bc2-4c48-9905-88c65334d42a\") " pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.205351 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.227479 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.248851 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.261885 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.263834 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.273452 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-5l8n6" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.278121 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.279331 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f9rh\" (UniqueName: \"kubernetes.io/projected/61ba0da3-99a5-4b43-a2fb-190260ab8f3a-kube-api-access-2f9rh\") pod \"mariadb-operator-controller-manager-c87fff755-7qmgq\" (UID: \"61ba0da3-99a5-4b43-a2fb-190260ab8f3a\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.299449 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mrlr6\" (UniqueName: \"kubernetes.io/projected/61da457f-7595-4df3-8705-e34138ec590d-kube-api-access-mrlr6\") pod \"neutron-operator-controller-manager-cb4666565-5mlm4\" (UID: \"61da457f-7595-4df3-8705-e34138ec590d\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.304696 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-phpvf"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.305646 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.319873 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.331095 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-v5nxb" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.355894 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-phpvf"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.408806 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z42c8\" (UniqueName: \"kubernetes.io/projected/094e4268-74c4-40e5-8f39-b6090b284c27-kube-api-access-z42c8\") pod \"nova-operator-controller-manager-65849867d6-phpvf\" (UID: \"094e4268-74c4-40e5-8f39-b6090b284c27\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.409052 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7vmp\" (UniqueName: \"kubernetes.io/projected/d4f3075e-95f9-432a-bfcd-621b6cbe2615-kube-api-access-g7vmp\") pod \"octavia-operator-controller-manager-7fc9b76cf6-k9n27\" (UID: \"d4f3075e-95f9-432a-bfcd-621b6cbe2615\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.409141 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mrlr6\" (UniqueName: \"kubernetes.io/projected/61da457f-7595-4df3-8705-e34138ec590d-kube-api-access-mrlr6\") pod \"neutron-operator-controller-manager-cb4666565-5mlm4\" (UID: \"61da457f-7595-4df3-8705-e34138ec590d\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.409211 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:39 crc kubenswrapper[4948]: E0120 20:02:39.409690 4948 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:39 crc kubenswrapper[4948]: E0120 20:02:39.409811 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert podName:09ceeac6-c058-41a8-a0d6-07b4bde73893 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:40.409788062 +0000 UTC m=+788.360513041 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert") pod "infra-operator-controller-manager-77c48c7859-xgc4z" (UID: "09ceeac6-c058-41a8-a0d6-07b4bde73893") : secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.420538 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.487283 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.489684 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.510750 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mrlr6\" (UniqueName: \"kubernetes.io/projected/61da457f-7595-4df3-8705-e34138ec590d-kube-api-access-mrlr6\") pod \"neutron-operator-controller-manager-cb4666565-5mlm4\" (UID: \"61da457f-7595-4df3-8705-e34138ec590d\") " pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.511891 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z42c8\" (UniqueName: \"kubernetes.io/projected/094e4268-74c4-40e5-8f39-b6090b284c27-kube-api-access-z42c8\") pod \"nova-operator-controller-manager-65849867d6-phpvf\" (UID: \"094e4268-74c4-40e5-8f39-b6090b284c27\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.511945 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7vmp\" (UniqueName: \"kubernetes.io/projected/d4f3075e-95f9-432a-bfcd-621b6cbe2615-kube-api-access-g7vmp\") pod \"octavia-operator-controller-manager-7fc9b76cf6-k9n27\" (UID: \"d4f3075e-95f9-432a-bfcd-621b6cbe2615\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.515564 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.521314 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-4hhwc" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.521900 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.529617 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.556890 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.561007 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-25f2q" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.586393 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z42c8\" (UniqueName: \"kubernetes.io/projected/094e4268-74c4-40e5-8f39-b6090b284c27-kube-api-access-z42c8\") pod \"nova-operator-controller-manager-65849867d6-phpvf\" (UID: \"094e4268-74c4-40e5-8f39-b6090b284c27\") " pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.616926 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.619725 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.619843 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbmc4\" (UniqueName: \"kubernetes.io/projected/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-kube-api-access-rbmc4\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.620910 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7vmp\" (UniqueName: \"kubernetes.io/projected/d4f3075e-95f9-432a-bfcd-621b6cbe2615-kube-api-access-g7vmp\") pod \"octavia-operator-controller-manager-7fc9b76cf6-k9n27\" (UID: \"d4f3075e-95f9-432a-bfcd-621b6cbe2615\") " pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.633455 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.662896 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.672902 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.674222 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.714657 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-tsf9c" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.725254 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbmc4\" (UniqueName: \"kubernetes.io/projected/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-kube-api-access-rbmc4\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.725384 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.725440 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbnvl\" (UniqueName: \"kubernetes.io/projected/ebd95a40-2e8d-481a-a842-b8fe125ebdb2-kube-api-access-xbnvl\") pod \"ovn-operator-controller-manager-55db956ddc-zpq74\" (UID: \"ebd95a40-2e8d-481a-a842-b8fe125ebdb2\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" Jan 20 20:02:39 crc kubenswrapper[4948]: E0120 20:02:39.727247 4948 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:39 crc kubenswrapper[4948]: E0120 20:02:39.727315 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert podName:40c9112e-c5f0-4cf7-8039-f50ff4640ba9 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:40.227293628 +0000 UTC m=+788.178018597 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" (UID: "40c9112e-c5f0-4cf7-8039-f50ff4640ba9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.747376 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.752764 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.755132 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.772107 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.778818 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-kzxx9" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.779000 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.779855 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.786105 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.787143 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.806859 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.816099 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.822246 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-xdpbw" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.822470 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-89crg" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.826825 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrfcp\" (UniqueName: \"kubernetes.io/projected/febd743e-d499-4cc9-9e66-29ac1b4ca89c-kube-api-access-jrfcp\") pod \"placement-operator-controller-manager-686df47fcb-wnzkb\" (UID: \"febd743e-d499-4cc9-9e66-29ac1b4ca89c\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.826920 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbnvl\" (UniqueName: \"kubernetes.io/projected/ebd95a40-2e8d-481a-a842-b8fe125ebdb2-kube-api-access-xbnvl\") pod \"ovn-operator-controller-manager-55db956ddc-zpq74\" (UID: \"ebd95a40-2e8d-481a-a842-b8fe125ebdb2\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.862768 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn"] Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.863597 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.874888 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-k8chc" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.907426 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbnvl\" (UniqueName: \"kubernetes.io/projected/ebd95a40-2e8d-481a-a842-b8fe125ebdb2-kube-api-access-xbnvl\") pod \"ovn-operator-controller-manager-55db956ddc-zpq74\" (UID: \"ebd95a40-2e8d-481a-a842-b8fe125ebdb2\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.917237 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.926673 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbmc4\" (UniqueName: \"kubernetes.io/projected/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-kube-api-access-rbmc4\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.929496 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9d5x\" (UniqueName: \"kubernetes.io/projected/910fc292-11a6-47de-80e6-59cc027e972c-kube-api-access-c9d5x\") pod \"telemetry-operator-controller-manager-5f8f495fcf-rsb9m\" (UID: \"910fc292-11a6-47de-80e6-59cc027e972c\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.929565 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrfcp\" (UniqueName: \"kubernetes.io/projected/febd743e-d499-4cc9-9e66-29ac1b4ca89c-kube-api-access-jrfcp\") pod \"placement-operator-controller-manager-686df47fcb-wnzkb\" (UID: \"febd743e-d499-4cc9-9e66-29ac1b4ca89c\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.929588 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sdptk\" (UniqueName: \"kubernetes.io/projected/80950323-03e4-4aa3-ba31-06043e2a51b9-kube-api-access-sdptk\") pod \"swift-operator-controller-manager-56544cf655-ngkkb\" (UID: \"80950323-03e4-4aa3-ba31-06043e2a51b9\") " pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.929674 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q59pz\" (UniqueName: \"kubernetes.io/projected/5a25aeaf-8323-46a9-8c2a-e000321478ee-kube-api-access-q59pz\") pod \"test-operator-controller-manager-7cd8bc9dbb-2bt9t\" (UID: \"5a25aeaf-8323-46a9-8c2a-e000321478ee\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.962559 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrfcp\" (UniqueName: \"kubernetes.io/projected/febd743e-d499-4cc9-9e66-29ac1b4ca89c-kube-api-access-jrfcp\") pod \"placement-operator-controller-manager-686df47fcb-wnzkb\" (UID: \"febd743e-d499-4cc9-9e66-29ac1b4ca89c\") " pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" Jan 20 20:02:39 crc kubenswrapper[4948]: I0120 20:02:39.973152 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn"] Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.028822 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk"] Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.031384 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q59pz\" (UniqueName: \"kubernetes.io/projected/5a25aeaf-8323-46a9-8c2a-e000321478ee-kube-api-access-q59pz\") pod \"test-operator-controller-manager-7cd8bc9dbb-2bt9t\" (UID: \"5a25aeaf-8323-46a9-8c2a-e000321478ee\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.031451 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2dbq\" (UniqueName: \"kubernetes.io/projected/76b9cf9a-a325-4528-8f35-3d0b94060ef1-kube-api-access-t2dbq\") pod \"watcher-operator-controller-manager-64cd966744-52fnn\" (UID: \"76b9cf9a-a325-4528-8f35-3d0b94060ef1\") " pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.031508 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9d5x\" (UniqueName: \"kubernetes.io/projected/910fc292-11a6-47de-80e6-59cc027e972c-kube-api-access-c9d5x\") pod \"telemetry-operator-controller-manager-5f8f495fcf-rsb9m\" (UID: \"910fc292-11a6-47de-80e6-59cc027e972c\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.031540 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sdptk\" (UniqueName: \"kubernetes.io/projected/80950323-03e4-4aa3-ba31-06043e2a51b9-kube-api-access-sdptk\") pod \"swift-operator-controller-manager-56544cf655-ngkkb\" (UID: \"80950323-03e4-4aa3-ba31-06043e2a51b9\") " pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.081007 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sdptk\" (UniqueName: \"kubernetes.io/projected/80950323-03e4-4aa3-ba31-06043e2a51b9-kube-api-access-sdptk\") pod \"swift-operator-controller-manager-56544cf655-ngkkb\" (UID: \"80950323-03e4-4aa3-ba31-06043e2a51b9\") " pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.112603 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9d5x\" (UniqueName: \"kubernetes.io/projected/910fc292-11a6-47de-80e6-59cc027e972c-kube-api-access-c9d5x\") pod \"telemetry-operator-controller-manager-5f8f495fcf-rsb9m\" (UID: \"910fc292-11a6-47de-80e6-59cc027e972c\") " pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.117136 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q59pz\" (UniqueName: \"kubernetes.io/projected/5a25aeaf-8323-46a9-8c2a-e000321478ee-kube-api-access-q59pz\") pod \"test-operator-controller-manager-7cd8bc9dbb-2bt9t\" (UID: \"5a25aeaf-8323-46a9-8c2a-e000321478ee\") " pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.129750 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw"] Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.130919 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.132812 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2dbq\" (UniqueName: \"kubernetes.io/projected/76b9cf9a-a325-4528-8f35-3d0b94060ef1-kube-api-access-t2dbq\") pod \"watcher-operator-controller-manager-64cd966744-52fnn\" (UID: \"76b9cf9a-a325-4528-8f35-3d0b94060ef1\") " pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.143758 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" event={"ID":"ef41048d-32d0-4b45-98ef-181e13e62c26","Type":"ContainerStarted","Data":"81bbe258cac697e3f15c83da56b42578d8b1d1c916f9fc058b15ce3086f93461"} Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.153788 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.154291 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-pxtdz" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.154544 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.174522 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.202272 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2dbq\" (UniqueName: \"kubernetes.io/projected/76b9cf9a-a325-4528-8f35-3d0b94060ef1-kube-api-access-t2dbq\") pod \"watcher-operator-controller-manager-64cd966744-52fnn\" (UID: \"76b9cf9a-a325-4528-8f35-3d0b94060ef1\") " pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.216020 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q"] Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.223078 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.231265 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw"] Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.233984 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.234050 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.234220 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnl65\" (UniqueName: \"kubernetes.io/projected/0a88f765-46a8-4252-832c-ccf595a0f1d2-kube-api-access-dnl65\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.234266 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.234450 4948 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.234502 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert podName:40c9112e-c5f0-4cf7-8039-f50ff4640ba9 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:41.234479812 +0000 UTC m=+789.185204781 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" (UID: "40c9112e-c5f0-4cf7-8039-f50ff4640ba9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.275586 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.327634 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk"] Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.342340 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.343306 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.344872 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-svmbz" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.368764 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.368815 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.368944 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnl65\" (UniqueName: \"kubernetes.io/projected/0a88f765-46a8-4252-832c-ccf595a0f1d2-kube-api-access-dnl65\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.369684 4948 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.369854 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:40.869826013 +0000 UTC m=+788.820550982 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "webhook-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.370073 4948 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.370102 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:40.87009404 +0000 UTC m=+788.820819009 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "metrics-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.483213 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.483454 4948 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.483515 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert podName:09ceeac6-c058-41a8-a0d6-07b4bde73893 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:42.48349929 +0000 UTC m=+790.434224259 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert") pod "infra-operator-controller-manager-77c48c7859-xgc4z" (UID: "09ceeac6-c058-41a8-a0d6-07b4bde73893") : secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.493127 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.494595 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.498398 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk"] Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.537113 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnl65\" (UniqueName: \"kubernetes.io/projected/0a88f765-46a8-4252-832c-ccf595a0f1d2-kube-api-access-dnl65\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.585697 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7gr8\" (UniqueName: \"kubernetes.io/projected/f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0-kube-api-access-d7gr8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-9m5nk\" (UID: \"f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.721903 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7gr8\" (UniqueName: \"kubernetes.io/projected/f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0-kube-api-access-d7gr8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-9m5nk\" (UID: \"f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.764992 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7gr8\" (UniqueName: \"kubernetes.io/projected/f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0-kube-api-access-d7gr8\") pod \"rabbitmq-cluster-operator-manager-668c99d594-9m5nk\" (UID: \"f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.928478 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.928522 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.928681 4948 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.928846 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:41.928826013 +0000 UTC m=+789.879550982 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "webhook-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.929032 4948 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: E0120 20:02:40.929089 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:41.92907201 +0000 UTC m=+789.879796979 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "metrics-server-cert" not found Jan 20 20:02:40 crc kubenswrapper[4948]: I0120 20:02:40.930270 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.053631 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b"] Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.081034 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd6a36d62_a638_45c5_956a_12cb6f1ced24.slice/crio-fe8850c109c1828892f33e02d06df6ac01e15da2c876e10cbd49e17dd2040c33 WatchSource:0}: Error finding container fe8850c109c1828892f33e02d06df6ac01e15da2c876e10cbd49e17dd2040c33: Status 404 returned error can't find the container with id fe8850c109c1828892f33e02d06df6ac01e15da2c876e10cbd49e17dd2040c33 Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.158764 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.163672 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.170602 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" event={"ID":"d6a36d62-a638-45c5-956a-12cb6f1ced24","Type":"ContainerStarted","Data":"fe8850c109c1828892f33e02d06df6ac01e15da2c876e10cbd49e17dd2040c33"} Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.172177 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" event={"ID":"d507465c-a0e3-494e-9e20-ef8c3517e059","Type":"ContainerStarted","Data":"4d1fbe24ae71050e2d182c073407c92ebcf27b6c9c0b15776e39fa5c0fbbebd8"} Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.242107 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.242341 4948 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.242397 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert podName:40c9112e-c5f0-4cf7-8039-f50ff4640ba9 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:43.242378797 +0000 UTC m=+791.193103766 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" (UID: "40c9112e-c5f0-4cf7-8039-f50ff4640ba9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.507900 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.531096 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.541961 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.553975 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.562671 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.579989 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.604515 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-65849867d6-phpvf"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.618663 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4"] Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.643021 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod094e4268_74c4_40e5_8f39_b6090b284c27.slice/crio-402465f778f9215526ca5c77f0d39b9da5ab074b60f55b6a8c4cee28979c13e7 WatchSource:0}: Error finding container 402465f778f9215526ca5c77f0d39b9da5ab074b60f55b6a8c4cee28979c13e7: Status 404 returned error can't find the container with id 402465f778f9215526ca5c77f0d39b9da5ab074b60f55b6a8c4cee28979c13e7 Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.649810 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod233a0ffe_a99e_4268_93ed_a2a20cb2c7ab.slice/crio-1e1d8b865ffe262a65a30ec109f62b09ad3ec3702b1ca3576a1f72ecc7d7eca6 WatchSource:0}: Error finding container 1e1d8b865ffe262a65a30ec109f62b09ad3ec3702b1ca3576a1f72ecc7d7eca6: Status 404 returned error can't find the container with id 1e1d8b865ffe262a65a30ec109f62b09ad3ec3702b1ca3576a1f72ecc7d7eca6 Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.650175 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.683416 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.693007 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t"] Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.725990 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk"] Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.731206 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a25aeaf_8323_46a9_8c2a_e000321478ee.slice/crio-effb9484d9e33250941ef33cf94614571b52de8cdf057602ffc6cdcc5b1373ec WatchSource:0}: Error finding container effb9484d9e33250941ef33cf94614571b52de8cdf057602ffc6cdcc5b1373ec: Status 404 returned error can't find the container with id effb9484d9e33250941ef33cf94614571b52de8cdf057602ffc6cdcc5b1373ec Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.731740 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod80950323_03e4_4aa3_ba31_06043e2a51b9.slice/crio-65cb54aa68207d73515ab869b02625e28cec1573b8051f5b0aff34d79731245f WatchSource:0}: Error finding container 65cb54aa68207d73515ab869b02625e28cec1573b8051f5b0aff34d79731245f: Status 404 returned error can't find the container with id 65cb54aa68207d73515ab869b02625e28cec1573b8051f5b0aff34d79731245f Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.744484 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2fc1e50_d924_4e66_9ba5_b7fcb44b4ed0.slice/crio-9513526dca130db6a09723a626996a1e11ff7e6d1594515a9a306ff12be3ca21 WatchSource:0}: Error finding container 9513526dca130db6a09723a626996a1e11ff7e6d1594515a9a306ff12be3ca21: Status 404 returned error can't find the container with id 9513526dca130db6a09723a626996a1e11ff7e6d1594515a9a306ff12be3ca21 Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.755070 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q59pz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-7cd8bc9dbb-2bt9t_openstack-operators(5a25aeaf-8323-46a9-8c2a-e000321478ee): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.755281 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g7vmp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7fc9b76cf6-k9n27_openstack-operators(d4f3075e-95f9-432a-bfcd-621b6cbe2615): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.757156 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" podUID="d4f3075e-95f9-432a-bfcd-621b6cbe2615" Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.757205 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" podUID="5a25aeaf-8323-46a9-8c2a-e000321478ee" Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.874066 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn"] Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.875649 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76b9cf9a_a325_4528_8f35_3d0b94060ef1.slice/crio-a789be43913f1383afbfb07dc4fd754e815a62903d1e6b286eb1d65aff71dbe8 WatchSource:0}: Error finding container a789be43913f1383afbfb07dc4fd754e815a62903d1e6b286eb1d65aff71dbe8: Status 404 returned error can't find the container with id a789be43913f1383afbfb07dc4fd754e815a62903d1e6b286eb1d65aff71dbe8 Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.881274 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:d687150a46d97eb382dcd8305a2a611943af74771debe1fa9cc13a21e51c69ad,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t2dbq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-64cd966744-52fnn_openstack-operators(76b9cf9a-a325-4528-8f35-3d0b94060ef1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.883305 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" podUID="76b9cf9a-a325-4528-8f35-3d0b94060ef1" Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.887690 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m"] Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.897451 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod910fc292_11a6_47de_80e6_59cc027e972c.slice/crio-36c8de0bee01693196c01bce3344da6504a3b79a45e2d1c44ea7e117d37670ec WatchSource:0}: Error finding container 36c8de0bee01693196c01bce3344da6504a3b79a45e2d1c44ea7e117d37670ec: Status 404 returned error can't find the container with id 36c8de0bee01693196c01bce3344da6504a3b79a45e2d1c44ea7e117d37670ec Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.903147 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb"] Jan 20 20:02:41 crc kubenswrapper[4948]: W0120 20:02:41.904812 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfebd743e_d499_4cc9_9e66_29ac1b4ca89c.slice/crio-b466b04584da73b023ea1644ec82e3e465e6edd5159140de71843d0a7621aa25 WatchSource:0}: Error finding container b466b04584da73b023ea1644ec82e3e465e6edd5159140de71843d0a7621aa25: Status 404 returned error can't find the container with id b466b04584da73b023ea1644ec82e3e465e6edd5159140de71843d0a7621aa25 Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.908418 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:146961cac3291daf96c1ca2bc7bd52bc94d1f4787a0770e23205c2c9beb0d737,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jrfcp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-686df47fcb-wnzkb_openstack-operators(febd743e-d499-4cc9-9e66-29ac1b4ca89c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.909576 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" podUID="febd743e-d499-4cc9-9e66-29ac1b4ca89c" Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.955211 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:41 crc kubenswrapper[4948]: I0120 20:02:41.955288 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.955415 4948 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.955421 4948 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.955497 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:43.955475359 +0000 UTC m=+791.906200328 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "webhook-server-cert" not found Jan 20 20:02:41 crc kubenswrapper[4948]: E0120 20:02:41.955548 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:43.955537331 +0000 UTC m=+791.906262290 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "metrics-server-cert" not found Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.179545 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" event={"ID":"febd743e-d499-4cc9-9e66-29ac1b4ca89c","Type":"ContainerStarted","Data":"b466b04584da73b023ea1644ec82e3e465e6edd5159140de71843d0a7621aa25"} Jan 20 20:02:42 crc kubenswrapper[4948]: E0120 20:02:42.181972 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:146961cac3291daf96c1ca2bc7bd52bc94d1f4787a0770e23205c2c9beb0d737\\\"\"" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" podUID="febd743e-d499-4cc9-9e66-29ac1b4ca89c" Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.183644 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" event={"ID":"910fc292-11a6-47de-80e6-59cc027e972c","Type":"ContainerStarted","Data":"36c8de0bee01693196c01bce3344da6504a3b79a45e2d1c44ea7e117d37670ec"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.198926 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" event={"ID":"61da457f-7595-4df3-8705-e34138ec590d","Type":"ContainerStarted","Data":"43c4e3eec40538286b343622eb2ab5183bdefde2d94bc72b7fc21b898e7e24d4"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.203966 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" event={"ID":"5a25aeaf-8323-46a9-8c2a-e000321478ee","Type":"ContainerStarted","Data":"effb9484d9e33250941ef33cf94614571b52de8cdf057602ffc6cdcc5b1373ec"} Jan 20 20:02:42 crc kubenswrapper[4948]: E0120 20:02:42.206918 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e\\\"\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" podUID="5a25aeaf-8323-46a9-8c2a-e000321478ee" Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.208470 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" event={"ID":"76b9cf9a-a325-4528-8f35-3d0b94060ef1","Type":"ContainerStarted","Data":"a789be43913f1383afbfb07dc4fd754e815a62903d1e6b286eb1d65aff71dbe8"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.209866 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" event={"ID":"80950323-03e4-4aa3-ba31-06043e2a51b9","Type":"ContainerStarted","Data":"65cb54aa68207d73515ab869b02625e28cec1573b8051f5b0aff34d79731245f"} Jan 20 20:02:42 crc kubenswrapper[4948]: E0120 20:02:42.210018 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:d687150a46d97eb382dcd8305a2a611943af74771debe1fa9cc13a21e51c69ad\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" podUID="76b9cf9a-a325-4528-8f35-3d0b94060ef1" Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.213690 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" event={"ID":"6f758308-6a33-4dc5-996e-beae970d4083","Type":"ContainerStarted","Data":"9ac8465683338a71a5ddda1d671a6100e5182ab24b7fca09deb1ad5283f176d5"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.218403 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" event={"ID":"233a0ffe-a99e-4268-93ed-a2a20cb2c7ab","Type":"ContainerStarted","Data":"1e1d8b865ffe262a65a30ec109f62b09ad3ec3702b1ca3576a1f72ecc7d7eca6"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.225753 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" event={"ID":"f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0","Type":"ContainerStarted","Data":"9513526dca130db6a09723a626996a1e11ff7e6d1594515a9a306ff12be3ca21"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.227144 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" event={"ID":"b78116d1-a584-49fa-ab14-86f78ce62420","Type":"ContainerStarted","Data":"aeea508f4982ae6f9740c5be7bcccff8db676681be6d15af914a6eca8292d96e"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.238967 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" event={"ID":"ed91900c-0efb-4184-8d92-d11fb7ae82b7","Type":"ContainerStarted","Data":"f06d921608baa13e8e801bbd2dd330e75cfb1c88997c3514080c62157246dd69"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.241372 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" event={"ID":"ebd95a40-2e8d-481a-a842-b8fe125ebdb2","Type":"ContainerStarted","Data":"f6d38643aecff4c00b87f1905a412d456d7ea4b06eda562550d76a91e31f285e"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.243839 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" event={"ID":"61ba0da3-99a5-4b43-a2fb-190260ab8f3a","Type":"ContainerStarted","Data":"47264f7c76c07a6c2bae5c993e1a7aca2184eaf676bd644a25c4cb2d88f93734"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.248070 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" event={"ID":"094e4268-74c4-40e5-8f39-b6090b284c27","Type":"ContainerStarted","Data":"402465f778f9215526ca5c77f0d39b9da5ab074b60f55b6a8c4cee28979c13e7"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.255290 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" event={"ID":"38d63cbf-6bc2-4c48-9905-88c65334d42a","Type":"ContainerStarted","Data":"8756b208458245b36f828ea5c2e376f49a622a2bc01dd781c4599bf2c8348db3"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.258263 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" event={"ID":"d8461566-61e6-495d-b1ad-c0178c2eb849","Type":"ContainerStarted","Data":"22c0ceb3808d69b599a64c21c3ab343ec0e11a1c3421328f07b8e8759475458b"} Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.260330 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" event={"ID":"d4f3075e-95f9-432a-bfcd-621b6cbe2615","Type":"ContainerStarted","Data":"14c255c876f6a6f9e1f09309aa3c16715aeab5924f6ca9b71d6e6e322fb64386"} Jan 20 20:02:42 crc kubenswrapper[4948]: E0120 20:02:42.264616 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" podUID="d4f3075e-95f9-432a-bfcd-621b6cbe2615" Jan 20 20:02:42 crc kubenswrapper[4948]: I0120 20:02:42.578165 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:42 crc kubenswrapper[4948]: E0120 20:02:42.578328 4948 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:42 crc kubenswrapper[4948]: E0120 20:02:42.578379 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert podName:09ceeac6-c058-41a8-a0d6-07b4bde73893 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:46.578364417 +0000 UTC m=+794.529089376 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert") pod "infra-operator-controller-manager-77c48c7859-xgc4z" (UID: "09ceeac6-c058-41a8-a0d6-07b4bde73893") : secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:43 crc kubenswrapper[4948]: E0120 20:02:43.277595 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:244a4906353b84899db16a89e1ebb64491c9f85e69327cb2a72b6da0142a6e5e\\\"\"" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" podUID="5a25aeaf-8323-46a9-8c2a-e000321478ee" Jan 20 20:02:43 crc kubenswrapper[4948]: E0120 20:02:43.277633 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ab629ec4ce57b5cde9cd6d75069e68edca441b97b7b5a3f58804e2e61766b729\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" podUID="d4f3075e-95f9-432a-bfcd-621b6cbe2615" Jan 20 20:02:43 crc kubenswrapper[4948]: E0120 20:02:43.277631 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:146961cac3291daf96c1ca2bc7bd52bc94d1f4787a0770e23205c2c9beb0d737\\\"\"" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" podUID="febd743e-d499-4cc9-9e66-29ac1b4ca89c" Jan 20 20:02:43 crc kubenswrapper[4948]: E0120 20:02:43.278342 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:d687150a46d97eb382dcd8305a2a611943af74771debe1fa9cc13a21e51c69ad\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" podUID="76b9cf9a-a325-4528-8f35-3d0b94060ef1" Jan 20 20:02:43 crc kubenswrapper[4948]: I0120 20:02:43.295374 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:43 crc kubenswrapper[4948]: E0120 20:02:43.296426 4948 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:43 crc kubenswrapper[4948]: E0120 20:02:43.298199 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert podName:40c9112e-c5f0-4cf7-8039-f50ff4640ba9 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:47.298178719 +0000 UTC m=+795.248903688 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" (UID: "40c9112e-c5f0-4cf7-8039-f50ff4640ba9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:44 crc kubenswrapper[4948]: I0120 20:02:44.011200 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:44 crc kubenswrapper[4948]: I0120 20:02:44.011260 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:44 crc kubenswrapper[4948]: E0120 20:02:44.011397 4948 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 20:02:44 crc kubenswrapper[4948]: E0120 20:02:44.011470 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:48.011451676 +0000 UTC m=+795.962176645 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "webhook-server-cert" not found Jan 20 20:02:44 crc kubenswrapper[4948]: E0120 20:02:44.011951 4948 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 20:02:44 crc kubenswrapper[4948]: E0120 20:02:44.011982 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:48.011973911 +0000 UTC m=+795.962698880 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "metrics-server-cert" not found Jan 20 20:02:46 crc kubenswrapper[4948]: I0120 20:02:46.653668 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:46 crc kubenswrapper[4948]: E0120 20:02:46.653917 4948 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:46 crc kubenswrapper[4948]: E0120 20:02:46.654299 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert podName:09ceeac6-c058-41a8-a0d6-07b4bde73893 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:54.654278821 +0000 UTC m=+802.605003790 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert") pod "infra-operator-controller-manager-77c48c7859-xgc4z" (UID: "09ceeac6-c058-41a8-a0d6-07b4bde73893") : secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:47 crc kubenswrapper[4948]: I0120 20:02:47.366099 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:47 crc kubenswrapper[4948]: E0120 20:02:47.366308 4948 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:47 crc kubenswrapper[4948]: E0120 20:02:47.366449 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert podName:40c9112e-c5f0-4cf7-8039-f50ff4640ba9 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:55.366432967 +0000 UTC m=+803.317157936 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert") pod "openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" (UID: "40c9112e-c5f0-4cf7-8039-f50ff4640ba9") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 20 20:02:48 crc kubenswrapper[4948]: I0120 20:02:48.079661 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:48 crc kubenswrapper[4948]: I0120 20:02:48.079766 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:48 crc kubenswrapper[4948]: E0120 20:02:48.079839 4948 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 20 20:02:48 crc kubenswrapper[4948]: E0120 20:02:48.079903 4948 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 20 20:02:48 crc kubenswrapper[4948]: E0120 20:02:48.079920 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:56.079900589 +0000 UTC m=+804.030625558 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "webhook-server-cert" not found Jan 20 20:02:48 crc kubenswrapper[4948]: E0120 20:02:48.079942 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs podName:0a88f765-46a8-4252-832c-ccf595a0f1d2 nodeName:}" failed. No retries permitted until 2026-01-20 20:02:56.0799295 +0000 UTC m=+804.030654489 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs") pod "openstack-operator-controller-manager-7c9b95f56c-kd6qw" (UID: "0a88f765-46a8-4252-832c-ccf595a0f1d2") : secret "metrics-server-cert" not found Jan 20 20:02:54 crc kubenswrapper[4948]: I0120 20:02:54.740682 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:02:54 crc kubenswrapper[4948]: E0120 20:02:54.740889 4948 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:54 crc kubenswrapper[4948]: E0120 20:02:54.741298 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert podName:09ceeac6-c058-41a8-a0d6-07b4bde73893 nodeName:}" failed. No retries permitted until 2026-01-20 20:03:10.741279886 +0000 UTC m=+818.692004855 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert") pod "infra-operator-controller-manager-77c48c7859-xgc4z" (UID: "09ceeac6-c058-41a8-a0d6-07b4bde73893") : secret "infra-operator-webhook-server-cert" not found Jan 20 20:02:55 crc kubenswrapper[4948]: I0120 20:02:55.375470 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:55 crc kubenswrapper[4948]: I0120 20:02:55.387159 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/40c9112e-c5f0-4cf7-8039-f50ff4640ba9-cert\") pod \"openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl\" (UID: \"40c9112e-c5f0-4cf7-8039-f50ff4640ba9\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:55 crc kubenswrapper[4948]: I0120 20:02:55.443744 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:02:56 crc kubenswrapper[4948]: I0120 20:02:56.086049 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:56 crc kubenswrapper[4948]: I0120 20:02:56.086131 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:56 crc kubenswrapper[4948]: I0120 20:02:56.095014 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-metrics-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:56 crc kubenswrapper[4948]: I0120 20:02:56.095790 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0a88f765-46a8-4252-832c-ccf595a0f1d2-webhook-certs\") pod \"openstack-operator-controller-manager-7c9b95f56c-kd6qw\" (UID: \"0a88f765-46a8-4252-832c-ccf595a0f1d2\") " pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:56 crc kubenswrapper[4948]: I0120 20:02:56.370398 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:02:57 crc kubenswrapper[4948]: E0120 20:02:57.281801 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492" Jan 20 20:02:57 crc kubenswrapper[4948]: E0120 20:02:57.282511 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9pvxt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-594c8c9d5d-m8f25_openstack-operators(d8461566-61e6-495d-b1ad-c0178c2eb849): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:02:57 crc kubenswrapper[4948]: E0120 20:02:57.283727 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" podUID="d8461566-61e6-495d-b1ad-c0178c2eb849" Jan 20 20:02:57 crc kubenswrapper[4948]: E0120 20:02:57.398101 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492\\\"\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" podUID="d8461566-61e6-495d-b1ad-c0178c2eb849" Jan 20 20:02:57 crc kubenswrapper[4948]: E0120 20:02:57.981178 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822" Jan 20 20:02:57 crc kubenswrapper[4948]: E0120 20:02:57.981978 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x9vpr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-77d5c5b54f-b7j48_openstack-operators(6f758308-6a33-4dc5-996e-beae970d4083): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:02:57 crc kubenswrapper[4948]: E0120 20:02:57.984362 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" podUID="6f758308-6a33-4dc5-996e-beae970d4083" Jan 20 20:02:58 crc kubenswrapper[4948]: E0120 20:02:58.417720 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:3311e627bcb860d9443592a2c67078417318c9eb77d8ef4d07f9aa7027d46822\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" podUID="6f758308-6a33-4dc5-996e-beae970d4083" Jan 20 20:02:58 crc kubenswrapper[4948]: E0120 20:02:58.638797 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:0f440bf7dc937ce0135bdd328716686fd2f1320f453a9ac4e11e96383148ad6c" Jan 20 20:02:58 crc kubenswrapper[4948]: E0120 20:02:58.639021 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0f440bf7dc937ce0135bdd328716686fd2f1320f453a9ac4e11e96383148ad6c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mrlr6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-cb4666565-5mlm4_openstack-operators(61da457f-7595-4df3-8705-e34138ec590d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:02:58 crc kubenswrapper[4948]: E0120 20:02:58.640210 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" podUID="61da457f-7595-4df3-8705-e34138ec590d" Jan 20 20:02:59 crc kubenswrapper[4948]: E0120 20:02:59.421729 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:0f440bf7dc937ce0135bdd328716686fd2f1320f453a9ac4e11e96383148ad6c\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" podUID="61da457f-7595-4df3-8705-e34138ec590d" Jan 20 20:03:00 crc kubenswrapper[4948]: E0120 20:03:00.234935 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:56c5f8b78445b3dbfc0d5afd9312906f6bef4dccf67302b0e4e5ca20bd263525" Jan 20 20:03:00 crc kubenswrapper[4948]: E0120 20:03:00.268973 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:56c5f8b78445b3dbfc0d5afd9312906f6bef4dccf67302b0e4e5ca20bd263525,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ptcbj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-78757b4889-6xdw4_openstack-operators(233a0ffe-a99e-4268-93ed-a2a20cb2c7ab): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:03:00 crc kubenswrapper[4948]: E0120 20:03:00.270174 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" podUID="233a0ffe-a99e-4268-93ed-a2a20cb2c7ab" Jan 20 20:03:00 crc kubenswrapper[4948]: E0120 20:03:00.432760 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:56c5f8b78445b3dbfc0d5afd9312906f6bef4dccf67302b0e4e5ca20bd263525\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" podUID="233a0ffe-a99e-4268-93ed-a2a20cb2c7ab" Jan 20 20:03:00 crc kubenswrapper[4948]: E0120 20:03:00.912901 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf" Jan 20 20:03:00 crc kubenswrapper[4948]: E0120 20:03:00.913162 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xbnvl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-55db956ddc-zpq74_openstack-operators(ebd95a40-2e8d-481a-a842-b8fe125ebdb2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:03:00 crc kubenswrapper[4948]: E0120 20:03:00.914395 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" podUID="ebd95a40-2e8d-481a-a842-b8fe125ebdb2" Jan 20 20:03:01 crc kubenswrapper[4948]: E0120 20:03:01.461384 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" podUID="ebd95a40-2e8d-481a-a842-b8fe125ebdb2" Jan 20 20:03:01 crc kubenswrapper[4948]: E0120 20:03:01.525969 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843" Jan 20 20:03:01 crc kubenswrapper[4948]: E0120 20:03:01.526293 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-c9d5x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-5f8f495fcf-rsb9m_openstack-operators(910fc292-11a6-47de-80e6-59cc027e972c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:03:01 crc kubenswrapper[4948]: E0120 20:03:01.527567 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" podUID="910fc292-11a6-47de-80e6-59cc027e972c" Jan 20 20:03:01 crc kubenswrapper[4948]: E0120 20:03:01.598530 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.89:5001/openstack-k8s-operators/swift-operator:21098e4af9a97a42aa9c03e3edec716c694bbf09" Jan 20 20:03:01 crc kubenswrapper[4948]: E0120 20:03:01.598595 4948 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.89:5001/openstack-k8s-operators/swift-operator:21098e4af9a97a42aa9c03e3edec716c694bbf09" Jan 20 20:03:01 crc kubenswrapper[4948]: E0120 20:03:01.598785 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.89:5001/openstack-k8s-operators/swift-operator:21098e4af9a97a42aa9c03e3edec716c694bbf09,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sdptk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-56544cf655-ngkkb_openstack-operators(80950323-03e4-4aa3-ba31-06043e2a51b9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:03:01 crc kubenswrapper[4948]: E0120 20:03:01.600019 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" podUID="80950323-03e4-4aa3-ba31-06043e2a51b9" Jan 20 20:03:02 crc kubenswrapper[4948]: E0120 20:03:02.464217 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.89:5001/openstack-k8s-operators/swift-operator:21098e4af9a97a42aa9c03e3edec716c694bbf09\\\"\"" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" podUID="80950323-03e4-4aa3-ba31-06043e2a51b9" Jan 20 20:03:02 crc kubenswrapper[4948]: E0120 20:03:02.464321 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:2e89109f5db66abf1afd15ef59bda35a53db40c5e59e020579ac5aa0acea1843\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" podUID="910fc292-11a6-47de-80e6-59cc027e972c" Jan 20 20:03:02 crc kubenswrapper[4948]: E0120 20:03:02.568289 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231" Jan 20 20:03:02 crc kubenswrapper[4948]: E0120 20:03:02.568520 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z42c8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-65849867d6-phpvf_openstack-operators(094e4268-74c4-40e5-8f39-b6090b284c27): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:03:02 crc kubenswrapper[4948]: E0120 20:03:02.569596 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" podUID="094e4268-74c4-40e5-8f39-b6090b284c27" Jan 20 20:03:03 crc kubenswrapper[4948]: E0120 20:03:03.479249 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:6defa56fc6a5bfbd5b27d28ff7b1c7bc89b24b2ef956e2a6d97b2726f668a231\\\"\"" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" podUID="094e4268-74c4-40e5-8f39-b6090b284c27" Jan 20 20:03:07 crc kubenswrapper[4948]: E0120 20:03:07.911992 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 20 20:03:07 crc kubenswrapper[4948]: E0120 20:03:07.912543 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d7gr8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-9m5nk_openstack-operators(f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:03:07 crc kubenswrapper[4948]: E0120 20:03:07.915304 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" podUID="f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0" Jan 20 20:03:08 crc kubenswrapper[4948]: E0120 20:03:08.424148 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e" Jan 20 20:03:08 crc kubenswrapper[4948]: E0120 20:03:08.424609 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-29d52,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-767fdc4f47-hkwvp_openstack-operators(ed91900c-0efb-4184-8d92-d11fb7ae82b7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:03:08 crc kubenswrapper[4948]: E0120 20:03:08.425942 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" podUID="ed91900c-0efb-4184-8d92-d11fb7ae82b7" Jan 20 20:03:08 crc kubenswrapper[4948]: E0120 20:03:08.518994 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:393d7567eef4fd05af625389f5a7384c6bb75108b21b06183f1f5e33aac5417e\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" podUID="ed91900c-0efb-4184-8d92-d11fb7ae82b7" Jan 20 20:03:08 crc kubenswrapper[4948]: E0120 20:03:08.519037 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" podUID="f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.028746 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl"] Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.092453 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw"] Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.523842 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" event={"ID":"d507465c-a0e3-494e-9e20-ef8c3517e059","Type":"ContainerStarted","Data":"3b538f05f76509472ae2ec8cefdcc41c1b7f602b391ca86ee1db6f6e818b1f9b"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.524883 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.526180 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" event={"ID":"d4f3075e-95f9-432a-bfcd-621b6cbe2615","Type":"ContainerStarted","Data":"c846f48e7c37c9571f2c316c32c350a705870167e24c5a2011a1b931bbb38e3a"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.526375 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.528007 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" event={"ID":"d6a36d62-a638-45c5-956a-12cb6f1ced24","Type":"ContainerStarted","Data":"0c54ac0af2358d6850f2fdbcad0f74a807089f718ec36e04c700e6d2b886efa1"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.528389 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.529308 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" event={"ID":"0a88f765-46a8-4252-832c-ccf595a0f1d2","Type":"ContainerStarted","Data":"5f533e05592e9ff995bfa711a2bc79f0e0e410276e559905301e1b6c33bfb591"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.530509 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" event={"ID":"5a25aeaf-8323-46a9-8c2a-e000321478ee","Type":"ContainerStarted","Data":"983e05e07b49fab13b3e77462bf09340877b075383e2a7480311715ff2def2bb"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.531019 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.533671 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" event={"ID":"ef41048d-32d0-4b45-98ef-181e13e62c26","Type":"ContainerStarted","Data":"b6a9f7549e947f09d5d6e3156e86c52df6740bdaa8e09042e0eda9b02435e2f7"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.533796 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.535177 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" event={"ID":"61ba0da3-99a5-4b43-a2fb-190260ab8f3a","Type":"ContainerStarted","Data":"6c3ce3b1a109453381f47ed6da071254462a064154bd04d5c5bfbd0b6a344991"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.535282 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.537080 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" event={"ID":"38d63cbf-6bc2-4c48-9905-88c65334d42a","Type":"ContainerStarted","Data":"de7a45c77fc173057e9a68d20224914c947cb3f83580cf1abdb0b34114f801ce"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.537519 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.538803 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" event={"ID":"76b9cf9a-a325-4528-8f35-3d0b94060ef1","Type":"ContainerStarted","Data":"45015aada87cb64ce9baa18ff0a2db1573bd8d85f3feb6bd59a098e30eb7cbd4"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.539282 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.540518 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" event={"ID":"b78116d1-a584-49fa-ab14-86f78ce62420","Type":"ContainerStarted","Data":"7cecff76df4f19d489d7ccc9b2641b7582bf3f657c6b4ee678294fe06392f8ed"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.541012 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.542220 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" event={"ID":"febd743e-d499-4cc9-9e66-29ac1b4ca89c","Type":"ContainerStarted","Data":"af5db4010724e90ce43dd590d2318c6a3f13e6ff1822c9fc74f2478dd94a6e36"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.542645 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.543503 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" event={"ID":"40c9112e-c5f0-4cf7-8039-f50ff4640ba9","Type":"ContainerStarted","Data":"f50cb95bf339d07e0c820cdd0dcb51f42375a1308ef516ad1428ba9d2e7f7991"} Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.744567 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" podStartSLOduration=9.058684588 podStartE2EDuration="31.744538089s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:40.357416172 +0000 UTC m=+788.308141141" lastFinishedPulling="2026-01-20 20:03:03.043269673 +0000 UTC m=+810.993994642" observedRunningTime="2026-01-20 20:03:09.736917713 +0000 UTC m=+817.687642682" watchObservedRunningTime="2026-01-20 20:03:09.744538089 +0000 UTC m=+817.695263058" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.842651 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" podStartSLOduration=11.445959241 podStartE2EDuration="31.842635325s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.188554034 +0000 UTC m=+789.139279003" lastFinishedPulling="2026-01-20 20:03:01.585230128 +0000 UTC m=+809.535955087" observedRunningTime="2026-01-20 20:03:09.807527151 +0000 UTC m=+817.758252120" watchObservedRunningTime="2026-01-20 20:03:09.842635325 +0000 UTC m=+817.793360284" Jan 20 20:03:09 crc kubenswrapper[4948]: I0120 20:03:09.890341 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" podStartSLOduration=4.343205394 podStartE2EDuration="30.890321745s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.908312194 +0000 UTC m=+789.859037163" lastFinishedPulling="2026-01-20 20:03:08.455428535 +0000 UTC m=+816.406153514" observedRunningTime="2026-01-20 20:03:09.88981581 +0000 UTC m=+817.840540799" watchObservedRunningTime="2026-01-20 20:03:09.890321745 +0000 UTC m=+817.841046714" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.048277 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" podStartSLOduration=10.176181144 podStartE2EDuration="32.048257724s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.161830368 +0000 UTC m=+789.112555337" lastFinishedPulling="2026-01-20 20:03:03.033906948 +0000 UTC m=+810.984631917" observedRunningTime="2026-01-20 20:03:09.980396094 +0000 UTC m=+817.931121053" watchObservedRunningTime="2026-01-20 20:03:10.048257724 +0000 UTC m=+817.998982693" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.117119 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" podStartSLOduration=4.402169063 podStartE2EDuration="31.117102143s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.754930493 +0000 UTC m=+789.705655462" lastFinishedPulling="2026-01-20 20:03:08.469863573 +0000 UTC m=+816.420588542" observedRunningTime="2026-01-20 20:03:10.049611643 +0000 UTC m=+818.000336602" watchObservedRunningTime="2026-01-20 20:03:10.117102143 +0000 UTC m=+818.067827112" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.191554 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" podStartSLOduration=12.606794763 podStartE2EDuration="32.191537739s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:40.093468271 +0000 UTC m=+788.044193240" lastFinishedPulling="2026-01-20 20:02:59.678211247 +0000 UTC m=+807.628936216" observedRunningTime="2026-01-20 20:03:10.119102719 +0000 UTC m=+818.069827688" watchObservedRunningTime="2026-01-20 20:03:10.191537739 +0000 UTC m=+818.142262708" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.192155 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" podStartSLOduration=4.564011774 podStartE2EDuration="31.192149597s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.881121895 +0000 UTC m=+789.831846864" lastFinishedPulling="2026-01-20 20:03:08.509259718 +0000 UTC m=+816.459984687" observedRunningTime="2026-01-20 20:03:10.183974205 +0000 UTC m=+818.134699174" watchObservedRunningTime="2026-01-20 20:03:10.192149597 +0000 UTC m=+818.142874566" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.218131 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" podStartSLOduration=4.462837601 podStartE2EDuration="31.218113142s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.75514945 +0000 UTC m=+789.705874419" lastFinishedPulling="2026-01-20 20:03:08.510424991 +0000 UTC m=+816.461149960" observedRunningTime="2026-01-20 20:03:10.212908844 +0000 UTC m=+818.163633813" watchObservedRunningTime="2026-01-20 20:03:10.218113142 +0000 UTC m=+818.168838111" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.251520 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" podStartSLOduration=11.821214351 podStartE2EDuration="32.251501596s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.085865618 +0000 UTC m=+789.036590587" lastFinishedPulling="2026-01-20 20:03:01.516152863 +0000 UTC m=+809.466877832" observedRunningTime="2026-01-20 20:03:10.248767419 +0000 UTC m=+818.199492388" watchObservedRunningTime="2026-01-20 20:03:10.251501596 +0000 UTC m=+818.202226565" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.330238 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" podStartSLOduration=12.346447747 podStartE2EDuration="32.330217384s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.532160379 +0000 UTC m=+789.482885348" lastFinishedPulling="2026-01-20 20:03:01.515930016 +0000 UTC m=+809.466654985" observedRunningTime="2026-01-20 20:03:10.328032592 +0000 UTC m=+818.278757561" watchObservedRunningTime="2026-01-20 20:03:10.330217384 +0000 UTC m=+818.280942353" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.821743 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.841263 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09ceeac6-c058-41a8-a0d6-07b4bde73893-cert\") pod \"infra-operator-controller-manager-77c48c7859-xgc4z\" (UID: \"09ceeac6-c058-41a8-a0d6-07b4bde73893\") " pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:03:10 crc kubenswrapper[4948]: I0120 20:03:10.919948 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:03:12 crc kubenswrapper[4948]: I0120 20:03:12.210633 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z"] Jan 20 20:03:12 crc kubenswrapper[4948]: I0120 20:03:12.580368 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" event={"ID":"09ceeac6-c058-41a8-a0d6-07b4bde73893","Type":"ContainerStarted","Data":"103047ab199108cddf0dd92d88a43cfe36ad1c3af7689a9516dea123bba2bd52"} Jan 20 20:03:14 crc kubenswrapper[4948]: I0120 20:03:14.571332 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:03:14 crc kubenswrapper[4948]: I0120 20:03:14.587516 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" event={"ID":"0a88f765-46a8-4252-832c-ccf595a0f1d2","Type":"ContainerStarted","Data":"06f7a8388b374ae8f8616b85f3e5ba3659f08d5b3875f12c44f7f99c8522cb90"} Jan 20 20:03:15 crc kubenswrapper[4948]: I0120 20:03:15.602760 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:03:15 crc kubenswrapper[4948]: I0120 20:03:15.654695 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" podStartSLOduration=35.654674274 podStartE2EDuration="35.654674274s" podCreationTimestamp="2026-01-20 20:02:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:03:15.65065581 +0000 UTC m=+823.601380779" watchObservedRunningTime="2026-01-20 20:03:15.654674274 +0000 UTC m=+823.605399243" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.622880 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" event={"ID":"61da457f-7595-4df3-8705-e34138ec590d","Type":"ContainerStarted","Data":"a60b5580f182b77b1ae44350191afbc9c537d04d8a67594e348ace64d9b86c84"} Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.623699 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.632014 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" event={"ID":"80950323-03e4-4aa3-ba31-06043e2a51b9","Type":"ContainerStarted","Data":"8389cf450c36ce83c8027300fd2dfe8a3c56bdecc2c6619faa1da720912e7315"} Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.632506 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.641178 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" event={"ID":"d8461566-61e6-495d-b1ad-c0178c2eb849","Type":"ContainerStarted","Data":"04c79eda669e60d14a773ae93e8c94e6dca0e1efbd8f8318d725be9bbc140764"} Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.642058 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.646433 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" event={"ID":"094e4268-74c4-40e5-8f39-b6090b284c27","Type":"ContainerStarted","Data":"97311b2b716633139d1277abec01181c9bb1533afe259155a29453b16006e9eb"} Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.647179 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.649063 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" event={"ID":"6f758308-6a33-4dc5-996e-beae970d4083","Type":"ContainerStarted","Data":"3693f3703ee4afbc1c975cd8aa91b4594ad1e39ea40517c1c8599cabcc7647e4"} Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.649826 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.651054 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" event={"ID":"910fc292-11a6-47de-80e6-59cc027e972c","Type":"ContainerStarted","Data":"13cb78311306ce3df041f33b4ff21ba70e5282f18128b34fa86ec8b432b8e81f"} Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.651597 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.653241 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" event={"ID":"233a0ffe-a99e-4268-93ed-a2a20cb2c7ab","Type":"ContainerStarted","Data":"f35f7055108a0c753daf6dad5d0bebd6038c3af5be62f6c8539bbcf042e79a54"} Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.653654 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.654578 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" podStartSLOduration=4.466590974 podStartE2EDuration="38.654554782s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.574298121 +0000 UTC m=+789.525023090" lastFinishedPulling="2026-01-20 20:03:15.762261929 +0000 UTC m=+823.712986898" observedRunningTime="2026-01-20 20:03:16.647749129 +0000 UTC m=+824.598474098" watchObservedRunningTime="2026-01-20 20:03:16.654554782 +0000 UTC m=+824.605279751" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.693607 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" podStartSLOduration=4.781668042 podStartE2EDuration="38.693586017s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.578245453 +0000 UTC m=+789.528970422" lastFinishedPulling="2026-01-20 20:03:15.490163408 +0000 UTC m=+823.440888397" observedRunningTime="2026-01-20 20:03:16.690652674 +0000 UTC m=+824.641377643" watchObservedRunningTime="2026-01-20 20:03:16.693586017 +0000 UTC m=+824.644310986" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.710225 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" podStartSLOduration=3.108603293 podStartE2EDuration="37.710208167s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.675430354 +0000 UTC m=+789.626155323" lastFinishedPulling="2026-01-20 20:03:16.277035228 +0000 UTC m=+824.227760197" observedRunningTime="2026-01-20 20:03:16.709440085 +0000 UTC m=+824.660165054" watchObservedRunningTime="2026-01-20 20:03:16.710208167 +0000 UTC m=+824.660933136" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.731062 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" podStartSLOduration=3.966760591 podStartE2EDuration="37.731046537s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.739566329 +0000 UTC m=+789.690291298" lastFinishedPulling="2026-01-20 20:03:15.503852275 +0000 UTC m=+823.454577244" observedRunningTime="2026-01-20 20:03:16.729293887 +0000 UTC m=+824.680018866" watchObservedRunningTime="2026-01-20 20:03:16.731046537 +0000 UTC m=+824.681771506" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.761208 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" podStartSLOduration=4.587707163 podStartE2EDuration="38.76118921s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.591456977 +0000 UTC m=+789.542181946" lastFinishedPulling="2026-01-20 20:03:15.764939024 +0000 UTC m=+823.715663993" observedRunningTime="2026-01-20 20:03:16.760016697 +0000 UTC m=+824.710741676" watchObservedRunningTime="2026-01-20 20:03:16.76118921 +0000 UTC m=+824.711914179" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.784097 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" podStartSLOduration=3.40774577 podStartE2EDuration="37.784075298s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.900168834 +0000 UTC m=+789.850893803" lastFinishedPulling="2026-01-20 20:03:16.276498352 +0000 UTC m=+824.227223331" observedRunningTime="2026-01-20 20:03:16.782210205 +0000 UTC m=+824.732935184" watchObservedRunningTime="2026-01-20 20:03:16.784075298 +0000 UTC m=+824.734800267" Jan 20 20:03:16 crc kubenswrapper[4948]: I0120 20:03:16.801866 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" podStartSLOduration=4.374154368 podStartE2EDuration="38.80184241s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.675781703 +0000 UTC m=+789.626506682" lastFinishedPulling="2026-01-20 20:03:16.103469755 +0000 UTC m=+824.054194724" observedRunningTime="2026-01-20 20:03:16.80006666 +0000 UTC m=+824.750791629" watchObservedRunningTime="2026-01-20 20:03:16.80184241 +0000 UTC m=+824.752567389" Jan 20 20:03:18 crc kubenswrapper[4948]: I0120 20:03:18.680349 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7ddb5c749-6vfzk" Jan 20 20:03:18 crc kubenswrapper[4948]: I0120 20:03:18.782520 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-9f958b845-6mp4q" Jan 20 20:03:18 crc kubenswrapper[4948]: I0120 20:03:18.784340 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-9b68f5989-2k89b" Jan 20 20:03:18 crc kubenswrapper[4948]: I0120 20:03:18.948316 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-c6994669c-x9hmd" Jan 20 20:03:19 crc kubenswrapper[4948]: I0120 20:03:19.209981 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-864f6b75bf-snszj" Jan 20 20:03:19 crc kubenswrapper[4948]: I0120 20:03:19.423889 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-7qmgq" Jan 20 20:03:19 crc kubenswrapper[4948]: I0120 20:03:19.921250 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7fc9b76cf6-k9n27" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.233558 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-686df47fcb-wnzkb" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.280281 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-56544cf655-ngkkb" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.349155 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-7cd8bc9dbb-2bt9t" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.496457 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-64cd966744-52fnn" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.703299 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" event={"ID":"09ceeac6-c058-41a8-a0d6-07b4bde73893","Type":"ContainerStarted","Data":"78f70a8ef8c3b18b78277428bc02b1de6625d3b30f15452cb68179fc1f9a6c92"} Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.703441 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.705539 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" event={"ID":"ed91900c-0efb-4184-8d92-d11fb7ae82b7","Type":"ContainerStarted","Data":"96f913501991dac2c991692e83d179090956c0784965c4f4b8ea70460f5794dc"} Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.705784 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.707617 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" event={"ID":"ebd95a40-2e8d-481a-a842-b8fe125ebdb2","Type":"ContainerStarted","Data":"5de580ae11a828655eb01c8c043700c1a83dee7ec5ea7d840c19fc4b95cb52a3"} Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.707837 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.709419 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" event={"ID":"40c9112e-c5f0-4cf7-8039-f50ff4640ba9","Type":"ContainerStarted","Data":"cfa900aa6cc8da354d8e94e906dca325af8e0faf637b012503f7599681ba1a3c"} Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.709594 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.734808 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" podStartSLOduration=35.058770746 podStartE2EDuration="42.734789648s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:03:12.22149849 +0000 UTC m=+820.172223469" lastFinishedPulling="2026-01-20 20:03:19.897517402 +0000 UTC m=+827.848242371" observedRunningTime="2026-01-20 20:03:20.73132125 +0000 UTC m=+828.682046219" watchObservedRunningTime="2026-01-20 20:03:20.734789648 +0000 UTC m=+828.685514617" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.782597 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" podStartSLOduration=3.4356622 podStartE2EDuration="41.782572541s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.56401274 +0000 UTC m=+789.514737709" lastFinishedPulling="2026-01-20 20:03:19.910923081 +0000 UTC m=+827.861648050" observedRunningTime="2026-01-20 20:03:20.782019295 +0000 UTC m=+828.732744264" watchObservedRunningTime="2026-01-20 20:03:20.782572541 +0000 UTC m=+828.733297510" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.785470 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" podStartSLOduration=4.283166353 podStartE2EDuration="42.785452192s" podCreationTimestamp="2026-01-20 20:02:38 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.664046421 +0000 UTC m=+789.614771390" lastFinishedPulling="2026-01-20 20:03:20.16633226 +0000 UTC m=+828.117057229" observedRunningTime="2026-01-20 20:03:20.763979875 +0000 UTC m=+828.714704854" watchObservedRunningTime="2026-01-20 20:03:20.785452192 +0000 UTC m=+828.736177161" Jan 20 20:03:20 crc kubenswrapper[4948]: I0120 20:03:20.817173 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" podStartSLOduration=30.971764139 podStartE2EDuration="41.817152619s" podCreationTimestamp="2026-01-20 20:02:39 +0000 UTC" firstStartedPulling="2026-01-20 20:03:09.04816647 +0000 UTC m=+816.998891439" lastFinishedPulling="2026-01-20 20:03:19.89355495 +0000 UTC m=+827.844279919" observedRunningTime="2026-01-20 20:03:20.807144976 +0000 UTC m=+828.757869955" watchObservedRunningTime="2026-01-20 20:03:20.817152619 +0000 UTC m=+828.767877598" Jan 20 20:03:23 crc kubenswrapper[4948]: I0120 20:03:23.734531 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" event={"ID":"f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0","Type":"ContainerStarted","Data":"101a737aa505b436233c212d103e58ae8ab600a3f369573ff67e291ed610fce6"} Jan 20 20:03:25 crc kubenswrapper[4948]: I0120 20:03:25.449542 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl" Jan 20 20:03:25 crc kubenswrapper[4948]: I0120 20:03:25.491054 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9m5nk" podStartSLOduration=4.194760484 podStartE2EDuration="45.491007575s" podCreationTimestamp="2026-01-20 20:02:40 +0000 UTC" firstStartedPulling="2026-01-20 20:02:41.75020164 +0000 UTC m=+789.700926609" lastFinishedPulling="2026-01-20 20:03:23.046448731 +0000 UTC m=+830.997173700" observedRunningTime="2026-01-20 20:03:23.752990507 +0000 UTC m=+831.703715496" watchObservedRunningTime="2026-01-20 20:03:25.491007575 +0000 UTC m=+833.441732564" Jan 20 20:03:26 crc kubenswrapper[4948]: I0120 20:03:26.378288 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7c9b95f56c-kd6qw" Jan 20 20:03:29 crc kubenswrapper[4948]: I0120 20:03:29.105482 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-b7j48" Jan 20 20:03:29 crc kubenswrapper[4948]: I0120 20:03:29.163860 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-78757b4889-6xdw4" Jan 20 20:03:29 crc kubenswrapper[4948]: I0120 20:03:29.234457 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-767fdc4f47-hkwvp" Jan 20 20:03:29 crc kubenswrapper[4948]: I0120 20:03:29.266274 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-m8f25" Jan 20 20:03:29 crc kubenswrapper[4948]: I0120 20:03:29.525065 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-cb4666565-5mlm4" Jan 20 20:03:29 crc kubenswrapper[4948]: I0120 20:03:29.637375 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-65849867d6-phpvf" Jan 20 20:03:30 crc kubenswrapper[4948]: I0120 20:03:30.178450 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-zpq74" Jan 20 20:03:30 crc kubenswrapper[4948]: I0120 20:03:30.497390 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-5f8f495fcf-rsb9m" Jan 20 20:03:30 crc kubenswrapper[4948]: I0120 20:03:30.926721 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-77c48c7859-xgc4z" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.812058 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-75wk2"] Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.813723 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.819614 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.819684 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-hdlxr" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.819745 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.820103 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.841928 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-75wk2"] Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.919672 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgc9t\" (UniqueName: \"kubernetes.io/projected/0c3623e2-3568-42d3-ac5a-6f803601f092-kube-api-access-zgc9t\") pod \"dnsmasq-dns-675f4bcbfc-75wk2\" (UID: \"0c3623e2-3568-42d3-ac5a-6f803601f092\") " pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.919900 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3623e2-3568-42d3-ac5a-6f803601f092-config\") pod \"dnsmasq-dns-675f4bcbfc-75wk2\" (UID: \"0c3623e2-3568-42d3-ac5a-6f803601f092\") " pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.953467 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jpn5n"] Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.954638 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:47 crc kubenswrapper[4948]: I0120 20:03:47.957931 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.014250 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jpn5n"] Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.023426 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgc9t\" (UniqueName: \"kubernetes.io/projected/0c3623e2-3568-42d3-ac5a-6f803601f092-kube-api-access-zgc9t\") pod \"dnsmasq-dns-675f4bcbfc-75wk2\" (UID: \"0c3623e2-3568-42d3-ac5a-6f803601f092\") " pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.023476 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3623e2-3568-42d3-ac5a-6f803601f092-config\") pod \"dnsmasq-dns-675f4bcbfc-75wk2\" (UID: \"0c3623e2-3568-42d3-ac5a-6f803601f092\") " pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.024434 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3623e2-3568-42d3-ac5a-6f803601f092-config\") pod \"dnsmasq-dns-675f4bcbfc-75wk2\" (UID: \"0c3623e2-3568-42d3-ac5a-6f803601f092\") " pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.060559 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgc9t\" (UniqueName: \"kubernetes.io/projected/0c3623e2-3568-42d3-ac5a-6f803601f092-kube-api-access-zgc9t\") pod \"dnsmasq-dns-675f4bcbfc-75wk2\" (UID: \"0c3623e2-3568-42d3-ac5a-6f803601f092\") " pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.124551 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bkk7t\" (UniqueName: \"kubernetes.io/projected/1cfa9442-f2db-4649-945d-7c1133779d93-kube-api-access-bkk7t\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.124602 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.124665 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-config\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.225437 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.225771 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-config\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.225880 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bkk7t\" (UniqueName: \"kubernetes.io/projected/1cfa9442-f2db-4649-945d-7c1133779d93-kube-api-access-bkk7t\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.225923 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.226880 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.226973 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-config\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.256741 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bkk7t\" (UniqueName: \"kubernetes.io/projected/1cfa9442-f2db-4649-945d-7c1133779d93-kube-api-access-bkk7t\") pod \"dnsmasq-dns-78dd6ddcc-jpn5n\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.282395 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.699237 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-75wk2"] Jan 20 20:03:48 crc kubenswrapper[4948]: W0120 20:03:48.709307 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c3623e2_3568_42d3_ac5a_6f803601f092.slice/crio-8c5515952ebd52352fc1508f8fbe08c8d98476077b71777dba3d408968f4385b WatchSource:0}: Error finding container 8c5515952ebd52352fc1508f8fbe08c8d98476077b71777dba3d408968f4385b: Status 404 returned error can't find the container with id 8c5515952ebd52352fc1508f8fbe08c8d98476077b71777dba3d408968f4385b Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.842674 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jpn5n"] Jan 20 20:03:48 crc kubenswrapper[4948]: W0120 20:03:48.853127 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1cfa9442_f2db_4649_945d_7c1133779d93.slice/crio-e1ed00c21ad7ac71803e85cc95a4e7cf11cec71fd6640aeff928ad2ef00e4ae8 WatchSource:0}: Error finding container e1ed00c21ad7ac71803e85cc95a4e7cf11cec71fd6640aeff928ad2ef00e4ae8: Status 404 returned error can't find the container with id e1ed00c21ad7ac71803e85cc95a4e7cf11cec71fd6640aeff928ad2ef00e4ae8 Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.934506 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" event={"ID":"1cfa9442-f2db-4649-945d-7c1133779d93","Type":"ContainerStarted","Data":"e1ed00c21ad7ac71803e85cc95a4e7cf11cec71fd6640aeff928ad2ef00e4ae8"} Jan 20 20:03:48 crc kubenswrapper[4948]: I0120 20:03:48.935996 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" event={"ID":"0c3623e2-3568-42d3-ac5a-6f803601f092","Type":"ContainerStarted","Data":"8c5515952ebd52352fc1508f8fbe08c8d98476077b71777dba3d408968f4385b"} Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.522225 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-75wk2"] Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.564186 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tnr9m"] Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.565617 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.598051 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tnr9m"] Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.666689 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zwtn\" (UniqueName: \"kubernetes.io/projected/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-kube-api-access-5zwtn\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.666843 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-config\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.666871 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-dns-svc\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.769214 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-config\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.769275 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-dns-svc\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.769318 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zwtn\" (UniqueName: \"kubernetes.io/projected/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-kube-api-access-5zwtn\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.770391 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-config\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.774420 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-dns-svc\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.829854 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zwtn\" (UniqueName: \"kubernetes.io/projected/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-kube-api-access-5zwtn\") pod \"dnsmasq-dns-666b6646f7-tnr9m\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.888164 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.916452 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jpn5n"] Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.928538 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6dvz5"] Jan 20 20:03:50 crc kubenswrapper[4948]: I0120 20:03:50.929959 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.028860 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6dvz5"] Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.106665 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zv86\" (UniqueName: \"kubernetes.io/projected/78d7b0e4-55a7-45b8-a119-b4117c298f65-kube-api-access-6zv86\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.106744 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.106790 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-config\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.208994 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zv86\" (UniqueName: \"kubernetes.io/projected/78d7b0e4-55a7-45b8-a119-b4117c298f65-kube-api-access-6zv86\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.209058 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.209094 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-config\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.211667 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.213044 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-config\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.244519 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zv86\" (UniqueName: \"kubernetes.io/projected/78d7b0e4-55a7-45b8-a119-b4117c298f65-kube-api-access-6zv86\") pod \"dnsmasq-dns-57d769cc4f-6dvz5\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.282388 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.687365 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tnr9m"] Jan 20 20:03:51 crc kubenswrapper[4948]: W0120 20:03:51.698614 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4253fee9_d31e_4dc7_a0fa_08d71e01c3e9.slice/crio-d35740da80d7ce66d8b40776c8575dffbb862077c4759cf07d1d5985d5cafc14 WatchSource:0}: Error finding container d35740da80d7ce66d8b40776c8575dffbb862077c4759cf07d1d5985d5cafc14: Status 404 returned error can't find the container with id d35740da80d7ce66d8b40776c8575dffbb862077c4759cf07d1d5985d5cafc14 Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.759409 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.762963 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.767842 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.768099 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.768327 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.768383 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.768506 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.768847 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-2f6qg" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.771083 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.774513 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.839060 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6dvz5"] Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.923987 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.924078 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98083b85-e2b1-48e2-82f9-c71019aa2475-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.924103 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98083b85-e2b1-48e2-82f9-c71019aa2475-pod-info\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.924159 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-config-data\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.924189 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.924550 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-server-conf\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.924670 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6jc8\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-kube-api-access-p6jc8\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.924695 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.924754 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.925105 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:51 crc kubenswrapper[4948]: I0120 20:03:51.925316 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.027343 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.027665 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98083b85-e2b1-48e2-82f9-c71019aa2475-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.027681 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98083b85-e2b1-48e2-82f9-c71019aa2475-pod-info\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.027823 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-config-data\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.027850 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.027885 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-server-conf\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.027962 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6jc8\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-kube-api-access-p6jc8\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.027989 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.028011 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.028040 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.028070 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.028581 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.031272 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-server-conf\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.031694 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.032177 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.032443 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.038092 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.044337 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" event={"ID":"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9","Type":"ContainerStarted","Data":"d35740da80d7ce66d8b40776c8575dffbb862077c4759cf07d1d5985d5cafc14"} Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.046038 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" event={"ID":"78d7b0e4-55a7-45b8-a119-b4117c298f65","Type":"ContainerStarted","Data":"faa17a253f80e72a09427bdccc126bb8ef0d153071d0be9b62f701496cff73f8"} Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.055408 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.059973 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98083b85-e2b1-48e2-82f9-c71019aa2475-pod-info\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.060648 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-config-data\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.060965 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6jc8\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-kube-api-access-p6jc8\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.070301 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.070524 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98083b85-e2b1-48e2-82f9-c71019aa2475-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.128746 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.145509 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.146876 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.152527 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.152641 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.152798 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.153050 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.153307 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.154096 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-bjbgp" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.157024 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.157464 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.330955 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331013 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331048 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331074 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331090 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331137 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8xlj\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-kube-api-access-d8xlj\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331172 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331189 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331220 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e243433b-5932-4d3d-a280-b7999d49e1ec-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331255 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.331276 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e243433b-5932-4d3d-a280-b7999d49e1ec-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.432852 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.432892 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.432945 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8xlj\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-kube-api-access-d8xlj\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.432969 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.432989 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.433035 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e243433b-5932-4d3d-a280-b7999d49e1ec-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.433050 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.433090 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e243433b-5932-4d3d-a280-b7999d49e1ec-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.433113 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.433161 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.433191 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.433369 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.433972 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.435877 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.438177 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.445361 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.475407 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.489251 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e243433b-5932-4d3d-a280-b7999d49e1ec-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.514559 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e243433b-5932-4d3d-a280-b7999d49e1ec-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.515526 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.516821 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.521389 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8xlj\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-kube-api-access-d8xlj\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.540299 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.775364 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:03:52 crc kubenswrapper[4948]: I0120 20:03:52.907456 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.118789 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"98083b85-e2b1-48e2-82f9-c71019aa2475","Type":"ContainerStarted","Data":"cd508d06f03199662e24df331e8edb08892a44ca23579abf655daae83300a630"} Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.401810 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.432069 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.448212 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.466592 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-5ntt4" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.467537 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.472461 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.503949 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.561113 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.671915 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.672014 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/67ccceb8-ab3c-4304-9336-8938675a1012-config-data-generated\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.672172 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-operator-scripts\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.672609 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67ccceb8-ab3c-4304-9336-8938675a1012-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.672685 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9522\" (UniqueName: \"kubernetes.io/projected/67ccceb8-ab3c-4304-9336-8938675a1012-kube-api-access-t9522\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.672802 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ccceb8-ab3c-4304-9336-8938675a1012-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.672888 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-kolla-config\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.672941 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-config-data-default\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.771822 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.774235 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ccceb8-ab3c-4304-9336-8938675a1012-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.774341 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-kolla-config\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.774390 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-config-data-default\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.774452 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.774493 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/67ccceb8-ab3c-4304-9336-8938675a1012-config-data-generated\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.774545 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-operator-scripts\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.774635 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67ccceb8-ab3c-4304-9336-8938675a1012-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.774688 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9522\" (UniqueName: \"kubernetes.io/projected/67ccceb8-ab3c-4304-9336-8938675a1012-kube-api-access-t9522\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.783250 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.787898 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-config-data-default\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.813561 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-operator-scripts\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.818953 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/67ccceb8-ab3c-4304-9336-8938675a1012-config-data-generated\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.837666 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/67ccceb8-ab3c-4304-9336-8938675a1012-kolla-config\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.838902 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9522\" (UniqueName: \"kubernetes.io/projected/67ccceb8-ab3c-4304-9336-8938675a1012-kube-api-access-t9522\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.839318 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/67ccceb8-ab3c-4304-9336-8938675a1012-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.866085 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67ccceb8-ab3c-4304-9336-8938675a1012-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:53 crc kubenswrapper[4948]: I0120 20:03:53.990919 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-galera-0\" (UID: \"67ccceb8-ab3c-4304-9336-8938675a1012\") " pod="openstack/openstack-galera-0" Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.065650 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.163050 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e243433b-5932-4d3d-a280-b7999d49e1ec","Type":"ContainerStarted","Data":"ff8946b701b6fa3b50707f6d57b561ed1d7b90562fae8aa23dbf396ecae63556"} Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.807039 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.808373 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.830451 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.830682 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-4hkc5" Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.830899 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.831026 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 20 20:03:54 crc kubenswrapper[4948]: I0120 20:03:54.838913 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:54.978831 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:54.979333 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/68260cc0-7bcb-4582-8154-60bbcdfbcf04-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:54.979369 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68260cc0-7bcb-4582-8154-60bbcdfbcf04-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:54.979411 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmc8k\" (UniqueName: \"kubernetes.io/projected/68260cc0-7bcb-4582-8154-60bbcdfbcf04-kube-api-access-kmc8k\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:54.979438 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:54.979517 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:54.979542 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/68260cc0-7bcb-4582-8154-60bbcdfbcf04-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:54.979566 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.086005 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/68260cc0-7bcb-4582-8154-60bbcdfbcf04-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.100882 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.103062 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68260cc0-7bcb-4582-8154-60bbcdfbcf04-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.103392 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmc8k\" (UniqueName: \"kubernetes.io/projected/68260cc0-7bcb-4582-8154-60bbcdfbcf04-kube-api-access-kmc8k\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.103425 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.103588 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.103640 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/68260cc0-7bcb-4582-8154-60bbcdfbcf04-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.103678 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.103749 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.104035 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.104151 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/68260cc0-7bcb-4582-8154-60bbcdfbcf04-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.106534 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.109345 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.109448 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.113354 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.124028 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/68260cc0-7bcb-4582-8154-60bbcdfbcf04-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.152504 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.152935 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.166497 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/68260cc0-7bcb-4582-8154-60bbcdfbcf04-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.167661 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.174048 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-qg4z2" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.182999 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68260cc0-7bcb-4582-8154-60bbcdfbcf04-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.191647 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.218338 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d6257c47-078f-4d41-942c-45d7e57b8c15-kolla-config\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.218387 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6257c47-078f-4d41-942c-45d7e57b8c15-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.218433 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6257c47-078f-4d41-942c-45d7e57b8c15-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.218455 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqbfn\" (UniqueName: \"kubernetes.io/projected/d6257c47-078f-4d41-942c-45d7e57b8c15-kube-api-access-dqbfn\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.218515 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d6257c47-078f-4d41-942c-45d7e57b8c15-config-data\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.225824 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmc8k\" (UniqueName: \"kubernetes.io/projected/68260cc0-7bcb-4582-8154-60bbcdfbcf04-kube-api-access-kmc8k\") pod \"openstack-cell1-galera-0\" (UID: \"68260cc0-7bcb-4582-8154-60bbcdfbcf04\") " pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.323012 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d6257c47-078f-4d41-942c-45d7e57b8c15-kolla-config\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.323054 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6257c47-078f-4d41-942c-45d7e57b8c15-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.323091 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6257c47-078f-4d41-942c-45d7e57b8c15-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.323113 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqbfn\" (UniqueName: \"kubernetes.io/projected/d6257c47-078f-4d41-942c-45d7e57b8c15-kube-api-access-dqbfn\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.323158 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d6257c47-078f-4d41-942c-45d7e57b8c15-config-data\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.323767 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d6257c47-078f-4d41-942c-45d7e57b8c15-kolla-config\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.328905 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6257c47-078f-4d41-942c-45d7e57b8c15-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.332107 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6257c47-078f-4d41-942c-45d7e57b8c15-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.332287 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d6257c47-078f-4d41-942c-45d7e57b8c15-config-data\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.463792 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqbfn\" (UniqueName: \"kubernetes.io/projected/d6257c47-078f-4d41-942c-45d7e57b8c15-kube-api-access-dqbfn\") pod \"memcached-0\" (UID: \"d6257c47-078f-4d41-942c-45d7e57b8c15\") " pod="openstack/memcached-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.479559 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 20 20:03:55 crc kubenswrapper[4948]: I0120 20:03:55.581795 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 20 20:03:56 crc kubenswrapper[4948]: I0120 20:03:56.211756 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"67ccceb8-ab3c-4304-9336-8938675a1012","Type":"ContainerStarted","Data":"b31bbf71a4f86d31d94ee617c086cbfcc074f064c9ee887b58de6d8ab4d079b4"} Jan 20 20:03:56 crc kubenswrapper[4948]: I0120 20:03:56.284273 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 20 20:03:56 crc kubenswrapper[4948]: I0120 20:03:56.665019 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.312466 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"68260cc0-7bcb-4582-8154-60bbcdfbcf04","Type":"ContainerStarted","Data":"b926a750a35c291523652d9594e972c1e4ec3ba5ee43bab6f820acc0a23a9b52"} Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.317394 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d6257c47-078f-4d41-942c-45d7e57b8c15","Type":"ContainerStarted","Data":"19aff260195df2f269d6aae87088fa10982274971ef6bdbb2cb04398ac6f5bc1"} Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.389782 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.391175 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.396450 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-v8v4h" Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.418254 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.540994 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdf85\" (UniqueName: \"kubernetes.io/projected/e7ede84b-9ae0-49a5-a694-acacdd4c1b95-kube-api-access-qdf85\") pod \"kube-state-metrics-0\" (UID: \"e7ede84b-9ae0-49a5-a694-acacdd4c1b95\") " pod="openstack/kube-state-metrics-0" Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.648715 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdf85\" (UniqueName: \"kubernetes.io/projected/e7ede84b-9ae0-49a5-a694-acacdd4c1b95-kube-api-access-qdf85\") pod \"kube-state-metrics-0\" (UID: \"e7ede84b-9ae0-49a5-a694-acacdd4c1b95\") " pod="openstack/kube-state-metrics-0" Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.685784 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdf85\" (UniqueName: \"kubernetes.io/projected/e7ede84b-9ae0-49a5-a694-acacdd4c1b95-kube-api-access-qdf85\") pod \"kube-state-metrics-0\" (UID: \"e7ede84b-9ae0-49a5-a694-acacdd4c1b95\") " pod="openstack/kube-state-metrics-0" Jan 20 20:03:57 crc kubenswrapper[4948]: I0120 20:03:57.735119 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 20:03:58 crc kubenswrapper[4948]: I0120 20:03:58.517817 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:03:58 crc kubenswrapper[4948]: W0120 20:03:58.532129 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7ede84b_9ae0_49a5_a694_acacdd4c1b95.slice/crio-8b8cb564068b7ecf0abf7b2a4334218fd50ef77c8124f5b0cc9815c61cfeef7e WatchSource:0}: Error finding container 8b8cb564068b7ecf0abf7b2a4334218fd50ef77c8124f5b0cc9815c61cfeef7e: Status 404 returned error can't find the container with id 8b8cb564068b7ecf0abf7b2a4334218fd50ef77c8124f5b0cc9815c61cfeef7e Jan 20 20:03:59 crc kubenswrapper[4948]: I0120 20:03:59.415302 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e7ede84b-9ae0-49a5-a694-acacdd4c1b95","Type":"ContainerStarted","Data":"8b8cb564068b7ecf0abf7b2a4334218fd50ef77c8124f5b0cc9815c61cfeef7e"} Jan 20 20:04:00 crc kubenswrapper[4948]: I0120 20:04:00.959302 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 20 20:04:00 crc kubenswrapper[4948]: I0120 20:04:00.961155 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:00 crc kubenswrapper[4948]: I0120 20:04:00.964664 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 20 20:04:00 crc kubenswrapper[4948]: I0120 20:04:00.964905 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 20 20:04:00 crc kubenswrapper[4948]: I0120 20:04:00.965026 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-fts25" Jan 20 20:04:00 crc kubenswrapper[4948]: I0120 20:04:00.965033 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 20 20:04:00 crc kubenswrapper[4948]: I0120 20:04:00.965203 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 20 20:04:00 crc kubenswrapper[4948]: I0120 20:04:00.981568 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.144273 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.144319 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db2122b2-3a50-4587-944d-ca8aa51882ab-config\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.144346 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/db2122b2-3a50-4587-944d-ca8aa51882ab-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.144361 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/db2122b2-3a50-4587-944d-ca8aa51882ab-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.144382 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.144678 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws9qv\" (UniqueName: \"kubernetes.io/projected/db2122b2-3a50-4587-944d-ca8aa51882ab-kube-api-access-ws9qv\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.144837 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.145006 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.246784 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db2122b2-3a50-4587-944d-ca8aa51882ab-config\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.246857 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/db2122b2-3a50-4587-944d-ca8aa51882ab-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.247253 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/db2122b2-3a50-4587-944d-ca8aa51882ab-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.247428 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.247500 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws9qv\" (UniqueName: \"kubernetes.io/projected/db2122b2-3a50-4587-944d-ca8aa51882ab-kube-api-access-ws9qv\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.247517 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.247564 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.247608 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.248020 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.249401 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db2122b2-3a50-4587-944d-ca8aa51882ab-config\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.251213 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/db2122b2-3a50-4587-944d-ca8aa51882ab-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.251316 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/db2122b2-3a50-4587-944d-ca8aa51882ab-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.257437 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.268667 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.278626 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db2122b2-3a50-4587-944d-ca8aa51882ab-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.282423 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws9qv\" (UniqueName: \"kubernetes.io/projected/db2122b2-3a50-4587-944d-ca8aa51882ab-kube-api-access-ws9qv\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.304557 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"db2122b2-3a50-4587-944d-ca8aa51882ab\") " pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.571165 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-hpg27"] Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.572573 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.578693 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.579085 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.586942 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-9h262" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.587231 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.609806 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hpg27"] Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.618215 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-dgkh9"] Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.620672 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.649127 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-dgkh9"] Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761003 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf6sm\" (UniqueName: \"kubernetes.io/projected/7e8635e1-cc17-4a2e-9b45-b76043df05d4-kube-api-access-nf6sm\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761119 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-run\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761518 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-run\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761589 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-etc-ovs\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761721 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46328967-e69a-4d46-86d6-ba1af248c8f2-combined-ca-bundle\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761760 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46328967-e69a-4d46-86d6-ba1af248c8f2-scripts\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761854 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-log\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761935 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-lib\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.761987 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e8635e1-cc17-4a2e-9b45-b76043df05d4-scripts\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.762111 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-run-ovn\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.762248 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-log-ovn\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.762357 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/46328967-e69a-4d46-86d6-ba1af248c8f2-ovn-controller-tls-certs\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.762400 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t77cw\" (UniqueName: \"kubernetes.io/projected/46328967-e69a-4d46-86d6-ba1af248c8f2-kube-api-access-t77cw\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.863781 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-run\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.864467 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-run\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.864615 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-etc-ovs\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.864800 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-etc-ovs\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.865008 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46328967-e69a-4d46-86d6-ba1af248c8f2-combined-ca-bundle\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.865116 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46328967-e69a-4d46-86d6-ba1af248c8f2-scripts\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.865354 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-log\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.865478 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-lib\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.865615 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e8635e1-cc17-4a2e-9b45-b76043df05d4-scripts\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.865770 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-log\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.867258 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/46328967-e69a-4d46-86d6-ba1af248c8f2-scripts\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.868110 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-lib\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.868880 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-run-ovn\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.869029 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-run-ovn\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.869262 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-log-ovn\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.869355 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t77cw\" (UniqueName: \"kubernetes.io/projected/46328967-e69a-4d46-86d6-ba1af248c8f2-kube-api-access-t77cw\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.869409 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/46328967-e69a-4d46-86d6-ba1af248c8f2-var-log-ovn\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.869428 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/46328967-e69a-4d46-86d6-ba1af248c8f2-ovn-controller-tls-certs\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.869516 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf6sm\" (UniqueName: \"kubernetes.io/projected/7e8635e1-cc17-4a2e-9b45-b76043df05d4-kube-api-access-nf6sm\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.869596 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-run\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.870366 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7e8635e1-cc17-4a2e-9b45-b76043df05d4-var-run\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.874095 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/46328967-e69a-4d46-86d6-ba1af248c8f2-ovn-controller-tls-certs\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.874570 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46328967-e69a-4d46-86d6-ba1af248c8f2-combined-ca-bundle\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.878216 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e8635e1-cc17-4a2e-9b45-b76043df05d4-scripts\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.907843 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf6sm\" (UniqueName: \"kubernetes.io/projected/7e8635e1-cc17-4a2e-9b45-b76043df05d4-kube-api-access-nf6sm\") pod \"ovn-controller-ovs-dgkh9\" (UID: \"7e8635e1-cc17-4a2e-9b45-b76043df05d4\") " pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.908472 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t77cw\" (UniqueName: \"kubernetes.io/projected/46328967-e69a-4d46-86d6-ba1af248c8f2-kube-api-access-t77cw\") pod \"ovn-controller-hpg27\" (UID: \"46328967-e69a-4d46-86d6-ba1af248c8f2\") " pod="openstack/ovn-controller-hpg27" Jan 20 20:04:01 crc kubenswrapper[4948]: I0120 20:04:01.937994 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:02 crc kubenswrapper[4948]: I0120 20:04:02.200918 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.359475 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.362587 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.366236 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.366900 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.367233 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.367688 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-44b2s" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.367869 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.471043 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/25b56954-2973-439d-a473-019d32e6ec0c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.471119 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.471166 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25b56954-2973-439d-a473-019d32e6ec0c-config\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.471199 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65npq\" (UniqueName: \"kubernetes.io/projected/25b56954-2973-439d-a473-019d32e6ec0c-kube-api-access-65npq\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.471267 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/25b56954-2973-439d-a473-019d32e6ec0c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.471343 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.471425 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.471487 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.573437 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.573510 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.573596 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.573618 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/25b56954-2973-439d-a473-019d32e6ec0c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.573657 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.573694 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25b56954-2973-439d-a473-019d32e6ec0c-config\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.573743 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65npq\" (UniqueName: \"kubernetes.io/projected/25b56954-2973-439d-a473-019d32e6ec0c-kube-api-access-65npq\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.573767 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/25b56954-2973-439d-a473-019d32e6ec0c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.575240 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/25b56954-2973-439d-a473-019d32e6ec0c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.575787 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.575896 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25b56954-2973-439d-a473-019d32e6ec0c-config\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.576235 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/25b56954-2973-439d-a473-019d32e6ec0c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.579127 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.580041 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.598041 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/25b56954-2973-439d-a473-019d32e6ec0c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.603028 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.604168 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65npq\" (UniqueName: \"kubernetes.io/projected/25b56954-2973-439d-a473-019d32e6ec0c-kube-api-access-65npq\") pod \"ovsdbserver-sb-0\" (UID: \"25b56954-2973-439d-a473-019d32e6ec0c\") " pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:04 crc kubenswrapper[4948]: I0120 20:04:04.684384 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:16 crc kubenswrapper[4948]: E0120 20:04:16.343252 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 20 20:04:16 crc kubenswrapper[4948]: E0120 20:04:16.343991 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d8xlj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(e243433b-5932-4d3d-a280-b7999d49e1ec): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:04:16 crc kubenswrapper[4948]: E0120 20:04:16.345192 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" Jan 20 20:04:16 crc kubenswrapper[4948]: E0120 20:04:16.554488 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" Jan 20 20:04:18 crc kubenswrapper[4948]: E0120 20:04:18.386386 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 20 20:04:18 crc kubenswrapper[4948]: E0120 20:04:18.386949 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p6jc8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(98083b85-e2b1-48e2-82f9-c71019aa2475): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:04:18 crc kubenswrapper[4948]: E0120 20:04:18.388168 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" Jan 20 20:04:18 crc kubenswrapper[4948]: E0120 20:04:18.566879 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" Jan 20 20:04:20 crc kubenswrapper[4948]: I0120 20:04:20.250602 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:04:20 crc kubenswrapper[4948]: I0120 20:04:20.250668 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:04:22 crc kubenswrapper[4948]: E0120 20:04:22.618715 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Jan 20 20:04:22 crc kubenswrapper[4948]: E0120 20:04:22.620145 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kmc8k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(68260cc0-7bcb-4582-8154-60bbcdfbcf04): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:04:22 crc kubenswrapper[4948]: E0120 20:04:22.621447 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="68260cc0-7bcb-4582-8154-60bbcdfbcf04" Jan 20 20:04:23 crc kubenswrapper[4948]: E0120 20:04:23.303058 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Jan 20 20:04:23 crc kubenswrapper[4948]: E0120 20:04:23.303290 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:nddh566h657h64ch5b7h5f8h568h558h57bh64dh654h59fh64ch56h654h658h57ch7fh665h596h65fh5fch9fh5f4h5d7h66dh67dh5f5h678h67h694h95q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dqbfn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(d6257c47-078f-4d41-942c-45d7e57b8c15): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:04:23 crc kubenswrapper[4948]: E0120 20:04:23.305062 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="d6257c47-078f-4d41-942c-45d7e57b8c15" Jan 20 20:04:23 crc kubenswrapper[4948]: E0120 20:04:23.607087 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="d6257c47-078f-4d41-942c-45d7e57b8c15" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.235918 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.236585 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bkk7t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-jpn5n_openstack(1cfa9442-f2db-4649-945d-7c1133779d93): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.236061 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.236882 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6zv86,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-6dvz5_openstack(78d7b0e4-55a7-45b8-a119-b4117c298f65): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.238060 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" podUID="1cfa9442-f2db-4649-945d-7c1133779d93" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.240847 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" podUID="78d7b0e4-55a7-45b8-a119-b4117c298f65" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.263523 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.264179 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zgc9t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-75wk2_openstack(0c3623e2-3568-42d3-ac5a-6f803601f092): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.266769 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" podUID="0c3623e2-3568-42d3-ac5a-6f803601f092" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.374877 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.375649 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5zwtn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-tnr9m_openstack(4253fee9-d31e-4dc7-a0fa-08d71e01c3e9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.378817 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" podUID="4253fee9-d31e-4dc7-a0fa-08d71e01c3e9" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.618379 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" podUID="4253fee9-d31e-4dc7-a0fa-08d71e01c3e9" Jan 20 20:04:24 crc kubenswrapper[4948]: E0120 20:04:24.618596 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" podUID="78d7b0e4-55a7-45b8-a119-b4117c298f65" Jan 20 20:04:24 crc kubenswrapper[4948]: I0120 20:04:24.845817 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hpg27"] Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.399664 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-dgkh9"] Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.558903 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.564337 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.622806 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" event={"ID":"1cfa9442-f2db-4649-945d-7c1133779d93","Type":"ContainerDied","Data":"e1ed00c21ad7ac71803e85cc95a4e7cf11cec71fd6640aeff928ad2ef00e4ae8"} Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.622849 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-jpn5n" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.626182 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dgkh9" event={"ID":"7e8635e1-cc17-4a2e-9b45-b76043df05d4","Type":"ContainerStarted","Data":"0880cd560a75431a72a5b7b1419ca475bda987a074f3163cbd18ca94dc8246ef"} Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.627330 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" event={"ID":"0c3623e2-3568-42d3-ac5a-6f803601f092","Type":"ContainerDied","Data":"8c5515952ebd52352fc1508f8fbe08c8d98476077b71777dba3d408968f4385b"} Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.627400 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-75wk2" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.631838 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"67ccceb8-ab3c-4304-9336-8938675a1012","Type":"ContainerStarted","Data":"00d1d447e1eb460ece84ccd3b2c070b35d02f835b0e98030b21a86a7d6394a2f"} Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.633947 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27" event={"ID":"46328967-e69a-4d46-86d6-ba1af248c8f2","Type":"ContainerStarted","Data":"2ad06342a6d157340d9b0cfe0c330ef9df0d95050214700cb3731132876d8eb4"} Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.638119 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"68260cc0-7bcb-4582-8154-60bbcdfbcf04","Type":"ContainerStarted","Data":"7f893cfcad6ddcbd3117e02f6ae206fe4e6fdc07b428990999498c61d8a258c2"} Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.679609 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-dns-svc\") pod \"1cfa9442-f2db-4649-945d-7c1133779d93\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.679746 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bkk7t\" (UniqueName: \"kubernetes.io/projected/1cfa9442-f2db-4649-945d-7c1133779d93-kube-api-access-bkk7t\") pod \"1cfa9442-f2db-4649-945d-7c1133779d93\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.679772 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgc9t\" (UniqueName: \"kubernetes.io/projected/0c3623e2-3568-42d3-ac5a-6f803601f092-kube-api-access-zgc9t\") pod \"0c3623e2-3568-42d3-ac5a-6f803601f092\" (UID: \"0c3623e2-3568-42d3-ac5a-6f803601f092\") " Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.679795 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-config\") pod \"1cfa9442-f2db-4649-945d-7c1133779d93\" (UID: \"1cfa9442-f2db-4649-945d-7c1133779d93\") " Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.679865 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3623e2-3568-42d3-ac5a-6f803601f092-config\") pod \"0c3623e2-3568-42d3-ac5a-6f803601f092\" (UID: \"0c3623e2-3568-42d3-ac5a-6f803601f092\") " Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.680564 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-config" (OuterVolumeSpecName: "config") pod "1cfa9442-f2db-4649-945d-7c1133779d93" (UID: "1cfa9442-f2db-4649-945d-7c1133779d93"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.681205 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c3623e2-3568-42d3-ac5a-6f803601f092-config" (OuterVolumeSpecName: "config") pod "0c3623e2-3568-42d3-ac5a-6f803601f092" (UID: "0c3623e2-3568-42d3-ac5a-6f803601f092"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.681901 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1cfa9442-f2db-4649-945d-7c1133779d93" (UID: "1cfa9442-f2db-4649-945d-7c1133779d93"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.690019 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c3623e2-3568-42d3-ac5a-6f803601f092-kube-api-access-zgc9t" (OuterVolumeSpecName: "kube-api-access-zgc9t") pod "0c3623e2-3568-42d3-ac5a-6f803601f092" (UID: "0c3623e2-3568-42d3-ac5a-6f803601f092"). InnerVolumeSpecName "kube-api-access-zgc9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.708125 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cfa9442-f2db-4649-945d-7c1133779d93-kube-api-access-bkk7t" (OuterVolumeSpecName: "kube-api-access-bkk7t") pod "1cfa9442-f2db-4649-945d-7c1133779d93" (UID: "1cfa9442-f2db-4649-945d-7c1133779d93"). InnerVolumeSpecName "kube-api-access-bkk7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.783186 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.783246 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bkk7t\" (UniqueName: \"kubernetes.io/projected/1cfa9442-f2db-4649-945d-7c1133779d93-kube-api-access-bkk7t\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.783265 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgc9t\" (UniqueName: \"kubernetes.io/projected/0c3623e2-3568-42d3-ac5a-6f803601f092-kube-api-access-zgc9t\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.783278 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1cfa9442-f2db-4649-945d-7c1133779d93-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:25 crc kubenswrapper[4948]: I0120 20:04:25.783294 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c3623e2-3568-42d3-ac5a-6f803601f092-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.010484 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jpn5n"] Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.046794 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-jpn5n"] Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.075069 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-75wk2"] Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.091621 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-75wk2"] Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.109650 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.228114 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 20 20:04:26 crc kubenswrapper[4948]: W0120 20:04:26.303682 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb2122b2_3a50_4587_944d_ca8aa51882ab.slice/crio-ea483a8d849eba904498a6123c80c4c2a2a37f46b12ab6f545efe159be672bb5 WatchSource:0}: Error finding container ea483a8d849eba904498a6123c80c4c2a2a37f46b12ab6f545efe159be672bb5: Status 404 returned error can't find the container with id ea483a8d849eba904498a6123c80c4c2a2a37f46b12ab6f545efe159be672bb5 Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.581511 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c3623e2-3568-42d3-ac5a-6f803601f092" path="/var/lib/kubelet/pods/0c3623e2-3568-42d3-ac5a-6f803601f092/volumes" Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.582348 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cfa9442-f2db-4649-945d-7c1133779d93" path="/var/lib/kubelet/pods/1cfa9442-f2db-4649-945d-7c1133779d93/volumes" Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.648176 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"db2122b2-3a50-4587-944d-ca8aa51882ab","Type":"ContainerStarted","Data":"ea483a8d849eba904498a6123c80c4c2a2a37f46b12ab6f545efe159be672bb5"} Jan 20 20:04:26 crc kubenswrapper[4948]: I0120 20:04:26.651995 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"25b56954-2973-439d-a473-019d32e6ec0c","Type":"ContainerStarted","Data":"05437e93804e9fd909113446327a24cc39d23008d73cb386eeb1e0f06c83c2a0"} Jan 20 20:04:27 crc kubenswrapper[4948]: I0120 20:04:27.663465 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e7ede84b-9ae0-49a5-a694-acacdd4c1b95","Type":"ContainerStarted","Data":"4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e"} Jan 20 20:04:27 crc kubenswrapper[4948]: I0120 20:04:27.663847 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 20 20:04:27 crc kubenswrapper[4948]: I0120 20:04:27.688449 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.146986269 podStartE2EDuration="30.688426515s" podCreationTimestamp="2026-01-20 20:03:57 +0000 UTC" firstStartedPulling="2026-01-20 20:03:58.536280152 +0000 UTC m=+866.487005121" lastFinishedPulling="2026-01-20 20:04:27.077720398 +0000 UTC m=+895.028445367" observedRunningTime="2026-01-20 20:04:27.681085867 +0000 UTC m=+895.631810836" watchObservedRunningTime="2026-01-20 20:04:27.688426515 +0000 UTC m=+895.639151484" Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.692918 4948 generic.go:334] "Generic (PLEG): container finished" podID="67ccceb8-ab3c-4304-9336-8938675a1012" containerID="00d1d447e1eb460ece84ccd3b2c070b35d02f835b0e98030b21a86a7d6394a2f" exitCode=0 Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.692999 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"67ccceb8-ab3c-4304-9336-8938675a1012","Type":"ContainerDied","Data":"00d1d447e1eb460ece84ccd3b2c070b35d02f835b0e98030b21a86a7d6394a2f"} Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.696023 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27" event={"ID":"46328967-e69a-4d46-86d6-ba1af248c8f2","Type":"ContainerStarted","Data":"82b128b11d1aab6009a3769dca3029212070c196cee91bc0ee4d938eb3abb37a"} Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.697054 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-hpg27" Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.698876 4948 generic.go:334] "Generic (PLEG): container finished" podID="68260cc0-7bcb-4582-8154-60bbcdfbcf04" containerID="7f893cfcad6ddcbd3117e02f6ae206fe4e6fdc07b428990999498c61d8a258c2" exitCode=0 Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.699013 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"68260cc0-7bcb-4582-8154-60bbcdfbcf04","Type":"ContainerDied","Data":"7f893cfcad6ddcbd3117e02f6ae206fe4e6fdc07b428990999498c61d8a258c2"} Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.717862 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dgkh9" event={"ID":"7e8635e1-cc17-4a2e-9b45-b76043df05d4","Type":"ContainerStarted","Data":"c4b4e6faa0a611924287bbac17ec8467b654a67d7fb54cdfac6553a64c5d90ad"} Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.724080 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"db2122b2-3a50-4587-944d-ca8aa51882ab","Type":"ContainerStarted","Data":"008f75fb0d0f45a9dbb49c966535b079f43900652432785a72ad4e27b19e64ec"} Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.725585 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"25b56954-2973-439d-a473-019d32e6ec0c","Type":"ContainerStarted","Data":"47fb6c7faf1ac6b0a04f1a9354f6315c3dd2b8ebb390ecdaa7704e6e52e82bb4"} Jan 20 20:04:30 crc kubenswrapper[4948]: I0120 20:04:30.792103 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-hpg27" podStartSLOduration=25.034426665 podStartE2EDuration="29.792082342s" podCreationTimestamp="2026-01-20 20:04:01 +0000 UTC" firstStartedPulling="2026-01-20 20:04:25.477442801 +0000 UTC m=+893.428167770" lastFinishedPulling="2026-01-20 20:04:30.235098478 +0000 UTC m=+898.185823447" observedRunningTime="2026-01-20 20:04:30.783944711 +0000 UTC m=+898.734669680" watchObservedRunningTime="2026-01-20 20:04:30.792082342 +0000 UTC m=+898.742807311" Jan 20 20:04:31 crc kubenswrapper[4948]: I0120 20:04:31.737233 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"67ccceb8-ab3c-4304-9336-8938675a1012","Type":"ContainerStarted","Data":"297730d09f800f90cc7ea7cd174a19a216f421b7460e4c9ad2aba5c4eee420a7"} Jan 20 20:04:31 crc kubenswrapper[4948]: I0120 20:04:31.740441 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"68260cc0-7bcb-4582-8154-60bbcdfbcf04","Type":"ContainerStarted","Data":"c3c6891906629a1c05cf4106b2114ce90f625fcbe1c7b10c266f7413979d3412"} Jan 20 20:04:31 crc kubenswrapper[4948]: I0120 20:04:31.743924 4948 generic.go:334] "Generic (PLEG): container finished" podID="7e8635e1-cc17-4a2e-9b45-b76043df05d4" containerID="c4b4e6faa0a611924287bbac17ec8467b654a67d7fb54cdfac6553a64c5d90ad" exitCode=0 Jan 20 20:04:31 crc kubenswrapper[4948]: I0120 20:04:31.745252 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dgkh9" event={"ID":"7e8635e1-cc17-4a2e-9b45-b76043df05d4","Type":"ContainerDied","Data":"c4b4e6faa0a611924287bbac17ec8467b654a67d7fb54cdfac6553a64c5d90ad"} Jan 20 20:04:31 crc kubenswrapper[4948]: I0120 20:04:31.785461 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371998.069338 podStartE2EDuration="38.785438549s" podCreationTimestamp="2026-01-20 20:03:53 +0000 UTC" firstStartedPulling="2026-01-20 20:03:56.321888732 +0000 UTC m=+864.272613701" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:04:31.785266144 +0000 UTC m=+899.735991113" watchObservedRunningTime="2026-01-20 20:04:31.785438549 +0000 UTC m=+899.736163518" Jan 20 20:04:31 crc kubenswrapper[4948]: I0120 20:04:31.785728 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=10.938353407 podStartE2EDuration="39.785697307s" podCreationTimestamp="2026-01-20 20:03:52 +0000 UTC" firstStartedPulling="2026-01-20 20:03:55.280842582 +0000 UTC m=+863.231567541" lastFinishedPulling="2026-01-20 20:04:24.128186472 +0000 UTC m=+892.078911441" observedRunningTime="2026-01-20 20:04:31.759578496 +0000 UTC m=+899.710303465" watchObservedRunningTime="2026-01-20 20:04:31.785697307 +0000 UTC m=+899.736422276" Jan 20 20:04:32 crc kubenswrapper[4948]: I0120 20:04:32.756534 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e243433b-5932-4d3d-a280-b7999d49e1ec","Type":"ContainerStarted","Data":"eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce"} Jan 20 20:04:32 crc kubenswrapper[4948]: I0120 20:04:32.760588 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"98083b85-e2b1-48e2-82f9-c71019aa2475","Type":"ContainerStarted","Data":"88ea89f84b7617f501ddbb4b9afb6561e4fd047f7d7e5577d0b84b4bdbfe0e71"} Jan 20 20:04:32 crc kubenswrapper[4948]: I0120 20:04:32.765399 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dgkh9" event={"ID":"7e8635e1-cc17-4a2e-9b45-b76043df05d4","Type":"ContainerStarted","Data":"f74a692dfe2a2f26c99fa54442cd08f788d9087faab855d983684842e1303bc2"} Jan 20 20:04:32 crc kubenswrapper[4948]: I0120 20:04:32.765450 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dgkh9" event={"ID":"7e8635e1-cc17-4a2e-9b45-b76043df05d4","Type":"ContainerStarted","Data":"8152b1221bbb617bba83a42b67a1e6f4e2cf61fafaf3c5ed2f28fae429d603b2"} Jan 20 20:04:32 crc kubenswrapper[4948]: I0120 20:04:32.765775 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:32 crc kubenswrapper[4948]: I0120 20:04:32.843784 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-dgkh9" podStartSLOduration=27.115022792 podStartE2EDuration="31.843763299s" podCreationTimestamp="2026-01-20 20:04:01 +0000 UTC" firstStartedPulling="2026-01-20 20:04:25.473270693 +0000 UTC m=+893.423995662" lastFinishedPulling="2026-01-20 20:04:30.2020112 +0000 UTC m=+898.152736169" observedRunningTime="2026-01-20 20:04:32.836253936 +0000 UTC m=+900.786978925" watchObservedRunningTime="2026-01-20 20:04:32.843763299 +0000 UTC m=+900.794488268" Jan 20 20:04:33 crc kubenswrapper[4948]: I0120 20:04:33.776726 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.066260 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.066338 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.704734 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-p8b7f"] Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.706633 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.738674 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p8b7f"] Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.862764 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5b4h\" (UniqueName: \"kubernetes.io/projected/896974b3-7b54-41b4-985e-9bfa9849f260-kube-api-access-z5b4h\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.863095 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-catalog-content\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.863251 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-utilities\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.964634 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5b4h\" (UniqueName: \"kubernetes.io/projected/896974b3-7b54-41b4-985e-9bfa9849f260-kube-api-access-z5b4h\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.964688 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-catalog-content\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.964833 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-utilities\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.965347 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-utilities\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.965875 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-catalog-content\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:34 crc kubenswrapper[4948]: I0120 20:04:34.994602 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5b4h\" (UniqueName: \"kubernetes.io/projected/896974b3-7b54-41b4-985e-9bfa9849f260-kube-api-access-z5b4h\") pod \"redhat-operators-p8b7f\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.029858 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.434910 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-g8dbf"] Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.438423 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.445430 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.454323 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-g8dbf"] Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.480659 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.481868 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.603022 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3bdd9991-773b-4709-a6e1-426c1fc89d23-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.603094 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bdd9991-773b-4709-a6e1-426c1fc89d23-combined-ca-bundle\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.603361 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bdd9991-773b-4709-a6e1-426c1fc89d23-config\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.603486 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/3bdd9991-773b-4709-a6e1-426c1fc89d23-ovs-rundir\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.603685 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b29gz\" (UniqueName: \"kubernetes.io/projected/3bdd9991-773b-4709-a6e1-426c1fc89d23-kube-api-access-b29gz\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.603952 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/3bdd9991-773b-4709-a6e1-426c1fc89d23-ovn-rundir\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.643995 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6dvz5"] Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.675125 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-vw2t4"] Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.676773 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.688193 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.706174 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bdd9991-773b-4709-a6e1-426c1fc89d23-combined-ca-bundle\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.706586 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bdd9991-773b-4709-a6e1-426c1fc89d23-config\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.706730 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/3bdd9991-773b-4709-a6e1-426c1fc89d23-ovs-rundir\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.706910 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b29gz\" (UniqueName: \"kubernetes.io/projected/3bdd9991-773b-4709-a6e1-426c1fc89d23-kube-api-access-b29gz\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.707035 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/3bdd9991-773b-4709-a6e1-426c1fc89d23-ovn-rundir\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.707190 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3bdd9991-773b-4709-a6e1-426c1fc89d23-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.708645 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3bdd9991-773b-4709-a6e1-426c1fc89d23-config\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.709171 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/3bdd9991-773b-4709-a6e1-426c1fc89d23-ovs-rundir\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.709933 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/3bdd9991-773b-4709-a6e1-426c1fc89d23-ovn-rundir\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.719245 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-vw2t4"] Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.727094 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bdd9991-773b-4709-a6e1-426c1fc89d23-combined-ca-bundle\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.739602 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3bdd9991-773b-4709-a6e1-426c1fc89d23-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.802386 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b29gz\" (UniqueName: \"kubernetes.io/projected/3bdd9991-773b-4709-a6e1-426c1fc89d23-kube-api-access-b29gz\") pod \"ovn-controller-metrics-g8dbf\" (UID: \"3bdd9991-773b-4709-a6e1-426c1fc89d23\") " pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.809805 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.809910 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-config\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.809976 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.810020 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tb8mn\" (UniqueName: \"kubernetes.io/projected/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-kube-api-access-tb8mn\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.911577 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.911636 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-config\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.911657 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.911682 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tb8mn\" (UniqueName: \"kubernetes.io/projected/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-kube-api-access-tb8mn\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.912859 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.913404 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.913493 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-config\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:35 crc kubenswrapper[4948]: I0120 20:04:35.957566 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tb8mn\" (UniqueName: \"kubernetes.io/projected/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-kube-api-access-tb8mn\") pod \"dnsmasq-dns-7f896c8c65-vw2t4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.063146 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-g8dbf" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.103317 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.123451 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tnr9m"] Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.253786 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-4ckg7"] Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.349565 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.452996 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.537045 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-4ckg7"] Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.543859 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gd64\" (UniqueName: \"kubernetes.io/projected/eacc8f3b-677c-4e7c-b507-a885147a2448-kube-api-access-9gd64\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.543916 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.543959 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-config\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.543979 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.544013 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.645874 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gd64\" (UniqueName: \"kubernetes.io/projected/eacc8f3b-677c-4e7c-b507-a885147a2448-kube-api-access-9gd64\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.645970 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.646015 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.646038 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-config\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.646131 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.648445 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.648879 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-config\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.649047 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:36 crc kubenswrapper[4948]: I0120 20:04:36.699561 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gd64\" (UniqueName: \"kubernetes.io/projected/eacc8f3b-677c-4e7c-b507-a885147a2448-kube-api-access-9gd64\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:37 crc kubenswrapper[4948]: I0120 20:04:37.026624 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-4ckg7\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:37 crc kubenswrapper[4948]: I0120 20:04:37.137335 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:37 crc kubenswrapper[4948]: I0120 20:04:37.741459 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.046252 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.153550 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.738520 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.791222 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.871672 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-dns-svc\") pod \"78d7b0e4-55a7-45b8-a119-b4117c298f65\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.872746 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-config\") pod \"78d7b0e4-55a7-45b8-a119-b4117c298f65\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.872775 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zv86\" (UniqueName: \"kubernetes.io/projected/78d7b0e4-55a7-45b8-a119-b4117c298f65-kube-api-access-6zv86\") pod \"78d7b0e4-55a7-45b8-a119-b4117c298f65\" (UID: \"78d7b0e4-55a7-45b8-a119-b4117c298f65\") " Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.872476 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "78d7b0e4-55a7-45b8-a119-b4117c298f65" (UID: "78d7b0e4-55a7-45b8-a119-b4117c298f65"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.874088 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-config" (OuterVolumeSpecName: "config") pod "78d7b0e4-55a7-45b8-a119-b4117c298f65" (UID: "78d7b0e4-55a7-45b8-a119-b4117c298f65"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.874328 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.874374 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/78d7b0e4-55a7-45b8-a119-b4117c298f65-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.884394 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78d7b0e4-55a7-45b8-a119-b4117c298f65-kube-api-access-6zv86" (OuterVolumeSpecName: "kube-api-access-6zv86") pod "78d7b0e4-55a7-45b8-a119-b4117c298f65" (UID: "78d7b0e4-55a7-45b8-a119-b4117c298f65"). InnerVolumeSpecName "kube-api-access-6zv86". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.940144 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" event={"ID":"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9","Type":"ContainerDied","Data":"d35740da80d7ce66d8b40776c8575dffbb862077c4759cf07d1d5985d5cafc14"} Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.940183 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-tnr9m" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.941311 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" event={"ID":"78d7b0e4-55a7-45b8-a119-b4117c298f65","Type":"ContainerDied","Data":"faa17a253f80e72a09427bdccc126bb8ef0d153071d0be9b62f701496cff73f8"} Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.941510 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-6dvz5" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.985232 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-dns-svc\") pod \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.985348 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-config\") pod \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.985403 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zwtn\" (UniqueName: \"kubernetes.io/projected/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-kube-api-access-5zwtn\") pod \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\" (UID: \"4253fee9-d31e-4dc7-a0fa-08d71e01c3e9\") " Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.987833 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4253fee9-d31e-4dc7-a0fa-08d71e01c3e9" (UID: "4253fee9-d31e-4dc7-a0fa-08d71e01c3e9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.988282 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-config" (OuterVolumeSpecName: "config") pod "4253fee9-d31e-4dc7-a0fa-08d71e01c3e9" (UID: "4253fee9-d31e-4dc7-a0fa-08d71e01c3e9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.989284 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.989305 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zv86\" (UniqueName: \"kubernetes.io/projected/78d7b0e4-55a7-45b8-a119-b4117c298f65-kube-api-access-6zv86\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:39 crc kubenswrapper[4948]: I0120 20:04:39.989315 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:39.999339 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-kube-api-access-5zwtn" (OuterVolumeSpecName: "kube-api-access-5zwtn") pod "4253fee9-d31e-4dc7-a0fa-08d71e01c3e9" (UID: "4253fee9-d31e-4dc7-a0fa-08d71e01c3e9"). InnerVolumeSpecName "kube-api-access-5zwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.073780 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6dvz5"] Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.086900 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-6dvz5"] Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.091502 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zwtn\" (UniqueName: \"kubernetes.io/projected/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9-kube-api-access-5zwtn\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.097543 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-p8b7f"] Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.350025 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tnr9m"] Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.365659 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-tnr9m"] Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.387785 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-vw2t4"] Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.414023 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-g8dbf"] Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.427047 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-4ckg7"] Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.582026 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4253fee9-d31e-4dc7-a0fa-08d71e01c3e9" path="/var/lib/kubelet/pods/4253fee9-d31e-4dc7-a0fa-08d71e01c3e9/volumes" Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.584394 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78d7b0e4-55a7-45b8-a119-b4117c298f65" path="/var/lib/kubelet/pods/78d7b0e4-55a7-45b8-a119-b4117c298f65/volumes" Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.953690 4948 generic.go:334] "Generic (PLEG): container finished" podID="896974b3-7b54-41b4-985e-9bfa9849f260" containerID="99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2" exitCode=0 Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.953816 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8b7f" event={"ID":"896974b3-7b54-41b4-985e-9bfa9849f260","Type":"ContainerDied","Data":"99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2"} Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.953877 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8b7f" event={"ID":"896974b3-7b54-41b4-985e-9bfa9849f260","Type":"ContainerStarted","Data":"0d87a4c0739f4110cda46611883a552739c9cabccdf123bdac9ed62fe68eb4bd"} Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.956629 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" event={"ID":"eacc8f3b-677c-4e7c-b507-a885147a2448","Type":"ContainerStarted","Data":"b5d1051970d2eba069ac2261886125692d7caa4cfc7f98f93424ec2b4bf32ccf"} Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.960082 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d6257c47-078f-4d41-942c-45d7e57b8c15","Type":"ContainerStarted","Data":"a0a1a9f58fdd6a3419cee22c8a9213b4d77df3156aa853a9e3c5a77595b08b3e"} Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.960356 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.961886 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" event={"ID":"7c115fd8-7c9c-49b9-abbb-295caa3a90e4","Type":"ContainerStarted","Data":"466fe68d07e7193f6506ad2fd6e46973bd2410c34ac14fef8a82d5d9c7b6ae09"} Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.964556 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"25b56954-2973-439d-a473-019d32e6ec0c","Type":"ContainerStarted","Data":"cd7201bb56eced8a9b8101c3af57cd34fd8238841ee6d0424e97d92327fb35c2"} Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.966193 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-g8dbf" event={"ID":"3bdd9991-773b-4709-a6e1-426c1fc89d23","Type":"ContainerStarted","Data":"baaad138351310803b9ce29593c76f8354eb0d01bfb94bfda1a0a58e16729fbd"} Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.966241 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-g8dbf" event={"ID":"3bdd9991-773b-4709-a6e1-426c1fc89d23","Type":"ContainerStarted","Data":"b1aebc7333325631d55b6892a5b16681d84ccdce3086029f0f62fcb502961c2d"} Jan 20 20:04:40 crc kubenswrapper[4948]: I0120 20:04:40.968870 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"db2122b2-3a50-4587-944d-ca8aa51882ab","Type":"ContainerStarted","Data":"7b4306000a98c754bf94e0ff5de3bf0190a3db3b6a2b49e5a75a24b03c4b5dd6"} Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.018227 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=24.830838363 podStartE2EDuration="38.018197031s" podCreationTimestamp="2026-01-20 20:04:03 +0000 UTC" firstStartedPulling="2026-01-20 20:04:26.508981951 +0000 UTC m=+894.459706920" lastFinishedPulling="2026-01-20 20:04:39.696340619 +0000 UTC m=+907.647065588" observedRunningTime="2026-01-20 20:04:41.015795253 +0000 UTC m=+908.966520242" watchObservedRunningTime="2026-01-20 20:04:41.018197031 +0000 UTC m=+908.968922000" Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.040736 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=3.071285957 podStartE2EDuration="46.040683529s" podCreationTimestamp="2026-01-20 20:03:55 +0000 UTC" firstStartedPulling="2026-01-20 20:03:56.726922847 +0000 UTC m=+864.677647816" lastFinishedPulling="2026-01-20 20:04:39.696320419 +0000 UTC m=+907.647045388" observedRunningTime="2026-01-20 20:04:41.03860617 +0000 UTC m=+908.989331129" watchObservedRunningTime="2026-01-20 20:04:41.040683529 +0000 UTC m=+908.991408518" Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.070618 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=28.663042554 podStartE2EDuration="42.070593897s" podCreationTimestamp="2026-01-20 20:03:59 +0000 UTC" firstStartedPulling="2026-01-20 20:04:26.306827868 +0000 UTC m=+894.257552827" lastFinishedPulling="2026-01-20 20:04:39.714379201 +0000 UTC m=+907.665104170" observedRunningTime="2026-01-20 20:04:41.067140019 +0000 UTC m=+909.017864988" watchObservedRunningTime="2026-01-20 20:04:41.070593897 +0000 UTC m=+909.021318876" Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.095080 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-g8dbf" podStartSLOduration=6.095050771 podStartE2EDuration="6.095050771s" podCreationTimestamp="2026-01-20 20:04:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:04:41.092179709 +0000 UTC m=+909.042904678" watchObservedRunningTime="2026-01-20 20:04:41.095050771 +0000 UTC m=+909.045775740" Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.588379 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.621533 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.696127 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.978242 4948 generic.go:334] "Generic (PLEG): container finished" podID="eacc8f3b-677c-4e7c-b507-a885147a2448" containerID="e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230" exitCode=0 Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.978350 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" event={"ID":"eacc8f3b-677c-4e7c-b507-a885147a2448","Type":"ContainerDied","Data":"e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230"} Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.980533 4948 generic.go:334] "Generic (PLEG): container finished" podID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" containerID="93927fc8df332fcc65c30ab9717117a81426e98decb20d77d75bc00035db8d96" exitCode=0 Jan 20 20:04:41 crc kubenswrapper[4948]: I0120 20:04:41.981034 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" event={"ID":"7c115fd8-7c9c-49b9-abbb-295caa3a90e4","Type":"ContainerDied","Data":"93927fc8df332fcc65c30ab9717117a81426e98decb20d77d75bc00035db8d96"} Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.486803 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-8lchs"] Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.488368 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.490424 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.500906 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8lchs"] Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.558938 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rg2z\" (UniqueName: \"kubernetes.io/projected/acd6e216-4534-4c7a-ab49-94213536db2c-kube-api-access-4rg2z\") pod \"root-account-create-update-8lchs\" (UID: \"acd6e216-4534-4c7a-ab49-94213536db2c\") " pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.559058 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acd6e216-4534-4c7a-ab49-94213536db2c-operator-scripts\") pod \"root-account-create-update-8lchs\" (UID: \"acd6e216-4534-4c7a-ab49-94213536db2c\") " pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.660615 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rg2z\" (UniqueName: \"kubernetes.io/projected/acd6e216-4534-4c7a-ab49-94213536db2c-kube-api-access-4rg2z\") pod \"root-account-create-update-8lchs\" (UID: \"acd6e216-4534-4c7a-ab49-94213536db2c\") " pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.660738 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acd6e216-4534-4c7a-ab49-94213536db2c-operator-scripts\") pod \"root-account-create-update-8lchs\" (UID: \"acd6e216-4534-4c7a-ab49-94213536db2c\") " pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.661828 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acd6e216-4534-4c7a-ab49-94213536db2c-operator-scripts\") pod \"root-account-create-update-8lchs\" (UID: \"acd6e216-4534-4c7a-ab49-94213536db2c\") " pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.680535 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rg2z\" (UniqueName: \"kubernetes.io/projected/acd6e216-4534-4c7a-ab49-94213536db2c-kube-api-access-4rg2z\") pod \"root-account-create-update-8lchs\" (UID: \"acd6e216-4534-4c7a-ab49-94213536db2c\") " pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.812289 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.990229 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8b7f" event={"ID":"896974b3-7b54-41b4-985e-9bfa9849f260","Type":"ContainerStarted","Data":"c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49"} Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.995949 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" event={"ID":"eacc8f3b-677c-4e7c-b507-a885147a2448","Type":"ContainerStarted","Data":"e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5"} Jan 20 20:04:42 crc kubenswrapper[4948]: I0120 20:04:42.996867 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:43 crc kubenswrapper[4948]: I0120 20:04:43.019834 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" event={"ID":"7c115fd8-7c9c-49b9-abbb-295caa3a90e4","Type":"ContainerStarted","Data":"88a329c47bc849d9d81ee64dc2e15e150fd046950685f7f623a9a05450901737"} Jan 20 20:04:43 crc kubenswrapper[4948]: I0120 20:04:43.046292 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" podStartSLOduration=6.242972812 podStartE2EDuration="7.046271079s" podCreationTimestamp="2026-01-20 20:04:36 +0000 UTC" firstStartedPulling="2026-01-20 20:04:40.400392214 +0000 UTC m=+908.351117183" lastFinishedPulling="2026-01-20 20:04:41.203690481 +0000 UTC m=+909.154415450" observedRunningTime="2026-01-20 20:04:43.041130033 +0000 UTC m=+910.991855002" watchObservedRunningTime="2026-01-20 20:04:43.046271079 +0000 UTC m=+910.996996048" Jan 20 20:04:43 crc kubenswrapper[4948]: I0120 20:04:43.067519 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" podStartSLOduration=7.451586487 podStartE2EDuration="8.067497861s" podCreationTimestamp="2026-01-20 20:04:35 +0000 UTC" firstStartedPulling="2026-01-20 20:04:40.382846276 +0000 UTC m=+908.333571245" lastFinishedPulling="2026-01-20 20:04:40.99875765 +0000 UTC m=+908.949482619" observedRunningTime="2026-01-20 20:04:43.067056549 +0000 UTC m=+911.017781548" watchObservedRunningTime="2026-01-20 20:04:43.067497861 +0000 UTC m=+911.018222830" Jan 20 20:04:43 crc kubenswrapper[4948]: I0120 20:04:43.102173 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-8lchs"] Jan 20 20:04:43 crc kubenswrapper[4948]: I0120 20:04:43.611890 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:43 crc kubenswrapper[4948]: I0120 20:04:43.669539 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:43 crc kubenswrapper[4948]: I0120 20:04:43.685067 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:43 crc kubenswrapper[4948]: I0120 20:04:43.741246 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.029939 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8lchs" event={"ID":"acd6e216-4534-4c7a-ab49-94213536db2c","Type":"ContainerStarted","Data":"fe77cc93577f6f2e5cf5e29437b5b5d2a9d3b82677502716ff829fd93a0bf771"} Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.029996 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8lchs" event={"ID":"acd6e216-4534-4c7a-ab49-94213536db2c","Type":"ContainerStarted","Data":"c0de38b251c9268644a376bcbac49f5c8cfb3f74eb34701b1f83cb946c57d55f"} Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.030568 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.031068 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.137755 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-8lchs" podStartSLOduration=2.137734767 podStartE2EDuration="2.137734767s" podCreationTimestamp="2026-01-20 20:04:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:04:44.132172549 +0000 UTC m=+912.082897518" watchObservedRunningTime="2026-01-20 20:04:44.137734767 +0000 UTC m=+912.088459736" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.151720 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.152121 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.676684 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.679007 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.684326 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.684561 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.685739 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-4mczw" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.685887 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.712240 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.852299 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8beae232-ff35-4a9c-9f68-0d9c20e65c67-config\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.852354 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqwpj\" (UniqueName: \"kubernetes.io/projected/8beae232-ff35-4a9c-9f68-0d9c20e65c67-kube-api-access-vqwpj\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.852534 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.852683 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8beae232-ff35-4a9c-9f68-0d9c20e65c67-scripts\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.852786 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8beae232-ff35-4a9c-9f68-0d9c20e65c67-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.852820 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.852863 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.954747 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-wfsm8"] Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.955425 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.955498 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8beae232-ff35-4a9c-9f68-0d9c20e65c67-scripts\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.955538 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8beae232-ff35-4a9c-9f68-0d9c20e65c67-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.955562 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.955597 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.955632 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8beae232-ff35-4a9c-9f68-0d9c20e65c67-config\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.955662 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqwpj\" (UniqueName: \"kubernetes.io/projected/8beae232-ff35-4a9c-9f68-0d9c20e65c67-kube-api-access-vqwpj\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.956076 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.956391 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8beae232-ff35-4a9c-9f68-0d9c20e65c67-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.957607 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8beae232-ff35-4a9c-9f68-0d9c20e65c67-config\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:44 crc kubenswrapper[4948]: I0120 20:04:44.957771 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8beae232-ff35-4a9c-9f68-0d9c20e65c67-scripts\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.057238 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e7c10dc-5215-41dc-80b4-00bc47be99e8-operator-scripts\") pod \"keystone-db-create-wfsm8\" (UID: \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\") " pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.057276 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chstt\" (UniqueName: \"kubernetes.io/projected/8e7c10dc-5215-41dc-80b4-00bc47be99e8-kube-api-access-chstt\") pod \"keystone-db-create-wfsm8\" (UID: \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\") " pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.057560 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.067102 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.067580 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/8beae232-ff35-4a9c-9f68-0d9c20e65c67-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.076438 4948 generic.go:334] "Generic (PLEG): container finished" podID="acd6e216-4534-4c7a-ab49-94213536db2c" containerID="fe77cc93577f6f2e5cf5e29437b5b5d2a9d3b82677502716ff829fd93a0bf771" exitCode=0 Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.077398 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8lchs" event={"ID":"acd6e216-4534-4c7a-ab49-94213536db2c","Type":"ContainerDied","Data":"fe77cc93577f6f2e5cf5e29437b5b5d2a9d3b82677502716ff829fd93a0bf771"} Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.138932 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-wfsm8"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.156566 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqwpj\" (UniqueName: \"kubernetes.io/projected/8beae232-ff35-4a9c-9f68-0d9c20e65c67-kube-api-access-vqwpj\") pod \"ovn-northd-0\" (UID: \"8beae232-ff35-4a9c-9f68-0d9c20e65c67\") " pod="openstack/ovn-northd-0" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.159489 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e7c10dc-5215-41dc-80b4-00bc47be99e8-operator-scripts\") pod \"keystone-db-create-wfsm8\" (UID: \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\") " pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.159519 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chstt\" (UniqueName: \"kubernetes.io/projected/8e7c10dc-5215-41dc-80b4-00bc47be99e8-kube-api-access-chstt\") pod \"keystone-db-create-wfsm8\" (UID: \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\") " pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.165838 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e7c10dc-5215-41dc-80b4-00bc47be99e8-operator-scripts\") pod \"keystone-db-create-wfsm8\" (UID: \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\") " pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.200272 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chstt\" (UniqueName: \"kubernetes.io/projected/8e7c10dc-5215-41dc-80b4-00bc47be99e8-kube-api-access-chstt\") pod \"keystone-db-create-wfsm8\" (UID: \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\") " pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.264219 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-b435-account-create-update-fcfpr"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.265323 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.267921 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.277883 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b435-account-create-update-fcfpr"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.306717 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.367616 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-operator-scripts\") pod \"keystone-b435-account-create-update-fcfpr\" (UID: \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\") " pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.367755 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpjjr\" (UniqueName: \"kubernetes.io/projected/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-kube-api-access-rpjjr\") pod \"keystone-b435-account-create-update-fcfpr\" (UID: \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\") " pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.423118 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.468922 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpjjr\" (UniqueName: \"kubernetes.io/projected/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-kube-api-access-rpjjr\") pod \"keystone-b435-account-create-update-fcfpr\" (UID: \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\") " pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.469234 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-operator-scripts\") pod \"keystone-b435-account-create-update-fcfpr\" (UID: \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\") " pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.470049 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-operator-scripts\") pod \"keystone-b435-account-create-update-fcfpr\" (UID: \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\") " pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.500258 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-dz2hg"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.501407 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.509414 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpjjr\" (UniqueName: \"kubernetes.io/projected/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-kube-api-access-rpjjr\") pod \"keystone-b435-account-create-update-fcfpr\" (UID: \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\") " pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.515210 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-dz2hg"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.622445 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-operator-scripts\") pod \"placement-db-create-dz2hg\" (UID: \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\") " pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.622505 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9bzk\" (UniqueName: \"kubernetes.io/projected/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-kube-api-access-j9bzk\") pod \"placement-db-create-dz2hg\" (UID: \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\") " pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.623188 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.624933 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.732011 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-operator-scripts\") pod \"placement-db-create-dz2hg\" (UID: \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\") " pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.734155 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9bzk\" (UniqueName: \"kubernetes.io/projected/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-kube-api-access-j9bzk\") pod \"placement-db-create-dz2hg\" (UID: \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\") " pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.734468 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-operator-scripts\") pod \"placement-db-create-dz2hg\" (UID: \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\") " pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.788202 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9bzk\" (UniqueName: \"kubernetes.io/projected/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-kube-api-access-j9bzk\") pod \"placement-db-create-dz2hg\" (UID: \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\") " pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.792347 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-4a12-account-create-update-l49lt"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.793485 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.798166 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.822342 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-4a12-account-create-update-l49lt"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.837829 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.869160 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-k8npv"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.870248 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-k8npv" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.879288 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-k8npv"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.936395 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.938485 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-png28\" (UniqueName: \"kubernetes.io/projected/0d2ae321-a5cb-4018-8899-7de265e16c0f-kube-api-access-png28\") pod \"placement-4a12-account-create-update-l49lt\" (UID: \"0d2ae321-a5cb-4018-8899-7de265e16c0f\") " pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.938592 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2ae321-a5cb-4018-8899-7de265e16c0f-operator-scripts\") pod \"placement-4a12-account-create-update-l49lt\" (UID: \"0d2ae321-a5cb-4018-8899-7de265e16c0f\") " pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.975314 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-1cf5-account-create-update-tjktc"] Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.976649 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.978827 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 20 20:04:45 crc kubenswrapper[4948]: I0120 20:04:45.991483 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1cf5-account-create-update-tjktc"] Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.040507 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dql6\" (UniqueName: \"kubernetes.io/projected/dc011d48-6711-420d-911f-ffda06687982-kube-api-access-8dql6\") pod \"glance-1cf5-account-create-update-tjktc\" (UID: \"dc011d48-6711-420d-911f-ffda06687982\") " pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.040766 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-png28\" (UniqueName: \"kubernetes.io/projected/0d2ae321-a5cb-4018-8899-7de265e16c0f-kube-api-access-png28\") pod \"placement-4a12-account-create-update-l49lt\" (UID: \"0d2ae321-a5cb-4018-8899-7de265e16c0f\") " pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.040914 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc011d48-6711-420d-911f-ffda06687982-operator-scripts\") pod \"glance-1cf5-account-create-update-tjktc\" (UID: \"dc011d48-6711-420d-911f-ffda06687982\") " pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.041016 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2ae321-a5cb-4018-8899-7de265e16c0f-operator-scripts\") pod \"placement-4a12-account-create-update-l49lt\" (UID: \"0d2ae321-a5cb-4018-8899-7de265e16c0f\") " pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.041134 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmdvj\" (UniqueName: \"kubernetes.io/projected/c3cfb075-5fb9-4769-be33-338ef93623d2-kube-api-access-cmdvj\") pod \"glance-db-create-k8npv\" (UID: \"c3cfb075-5fb9-4769-be33-338ef93623d2\") " pod="openstack/glance-db-create-k8npv" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.041221 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3cfb075-5fb9-4769-be33-338ef93623d2-operator-scripts\") pod \"glance-db-create-k8npv\" (UID: \"c3cfb075-5fb9-4769-be33-338ef93623d2\") " pod="openstack/glance-db-create-k8npv" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.042209 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2ae321-a5cb-4018-8899-7de265e16c0f-operator-scripts\") pod \"placement-4a12-account-create-update-l49lt\" (UID: \"0d2ae321-a5cb-4018-8899-7de265e16c0f\") " pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.063452 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-png28\" (UniqueName: \"kubernetes.io/projected/0d2ae321-a5cb-4018-8899-7de265e16c0f-kube-api-access-png28\") pod \"placement-4a12-account-create-update-l49lt\" (UID: \"0d2ae321-a5cb-4018-8899-7de265e16c0f\") " pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.174135 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmdvj\" (UniqueName: \"kubernetes.io/projected/c3cfb075-5fb9-4769-be33-338ef93623d2-kube-api-access-cmdvj\") pod \"glance-db-create-k8npv\" (UID: \"c3cfb075-5fb9-4769-be33-338ef93623d2\") " pod="openstack/glance-db-create-k8npv" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.174180 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3cfb075-5fb9-4769-be33-338ef93623d2-operator-scripts\") pod \"glance-db-create-k8npv\" (UID: \"c3cfb075-5fb9-4769-be33-338ef93623d2\") " pod="openstack/glance-db-create-k8npv" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.174232 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dql6\" (UniqueName: \"kubernetes.io/projected/dc011d48-6711-420d-911f-ffda06687982-kube-api-access-8dql6\") pod \"glance-1cf5-account-create-update-tjktc\" (UID: \"dc011d48-6711-420d-911f-ffda06687982\") " pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.174322 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc011d48-6711-420d-911f-ffda06687982-operator-scripts\") pod \"glance-1cf5-account-create-update-tjktc\" (UID: \"dc011d48-6711-420d-911f-ffda06687982\") " pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.174583 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.175057 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3cfb075-5fb9-4769-be33-338ef93623d2-operator-scripts\") pod \"glance-db-create-k8npv\" (UID: \"c3cfb075-5fb9-4769-be33-338ef93623d2\") " pod="openstack/glance-db-create-k8npv" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.175379 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc011d48-6711-420d-911f-ffda06687982-operator-scripts\") pod \"glance-1cf5-account-create-update-tjktc\" (UID: \"dc011d48-6711-420d-911f-ffda06687982\") " pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.202212 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dql6\" (UniqueName: \"kubernetes.io/projected/dc011d48-6711-420d-911f-ffda06687982-kube-api-access-8dql6\") pod \"glance-1cf5-account-create-update-tjktc\" (UID: \"dc011d48-6711-420d-911f-ffda06687982\") " pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.205855 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmdvj\" (UniqueName: \"kubernetes.io/projected/c3cfb075-5fb9-4769-be33-338ef93623d2-kube-api-access-cmdvj\") pod \"glance-db-create-k8npv\" (UID: \"c3cfb075-5fb9-4769-be33-338ef93623d2\") " pod="openstack/glance-db-create-k8npv" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.329892 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:46 crc kubenswrapper[4948]: I0120 20:04:46.486895 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-k8npv" Jan 20 20:04:47 crc kubenswrapper[4948]: I0120 20:04:47.139883 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:47 crc kubenswrapper[4948]: I0120 20:04:47.267325 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-vw2t4"] Jan 20 20:04:47 crc kubenswrapper[4948]: I0120 20:04:47.267603 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" podUID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" containerName="dnsmasq-dns" containerID="cri-o://88a329c47bc849d9d81ee64dc2e15e150fd046950685f7f623a9a05450901737" gracePeriod=10 Jan 20 20:04:47 crc kubenswrapper[4948]: I0120 20:04:47.272996 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.142500 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-s9krd"] Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.144093 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.179580 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-s9krd"] Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.184054 4948 generic.go:334] "Generic (PLEG): container finished" podID="896974b3-7b54-41b4-985e-9bfa9849f260" containerID="c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49" exitCode=0 Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.184179 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8b7f" event={"ID":"896974b3-7b54-41b4-985e-9bfa9849f260","Type":"ContainerDied","Data":"c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49"} Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.195905 4948 generic.go:334] "Generic (PLEG): container finished" podID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" containerID="88a329c47bc849d9d81ee64dc2e15e150fd046950685f7f623a9a05450901737" exitCode=0 Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.195941 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" event={"ID":"7c115fd8-7c9c-49b9-abbb-295caa3a90e4","Type":"ContainerDied","Data":"88a329c47bc849d9d81ee64dc2e15e150fd046950685f7f623a9a05450901737"} Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.278593 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.340510 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-config\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.340606 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.340835 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-dns-svc\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.340878 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.340960 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r64vw\" (UniqueName: \"kubernetes.io/projected/6a31f534-f99e-4471-a17f-4630288d7353-kube-api-access-r64vw\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.443303 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acd6e216-4534-4c7a-ab49-94213536db2c-operator-scripts\") pod \"acd6e216-4534-4c7a-ab49-94213536db2c\" (UID: \"acd6e216-4534-4c7a-ab49-94213536db2c\") " Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.443825 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rg2z\" (UniqueName: \"kubernetes.io/projected/acd6e216-4534-4c7a-ab49-94213536db2c-kube-api-access-4rg2z\") pod \"acd6e216-4534-4c7a-ab49-94213536db2c\" (UID: \"acd6e216-4534-4c7a-ab49-94213536db2c\") " Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.444541 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r64vw\" (UniqueName: \"kubernetes.io/projected/6a31f534-f99e-4471-a17f-4630288d7353-kube-api-access-r64vw\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.444543 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/acd6e216-4534-4c7a-ab49-94213536db2c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "acd6e216-4534-4c7a-ab49-94213536db2c" (UID: "acd6e216-4534-4c7a-ab49-94213536db2c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.444726 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-config\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.444824 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.444909 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-dns-svc\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.444956 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.445133 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/acd6e216-4534-4c7a-ab49-94213536db2c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.446107 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.446991 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-dns-svc\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.447504 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-config\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.459695 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.471316 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acd6e216-4534-4c7a-ab49-94213536db2c-kube-api-access-4rg2z" (OuterVolumeSpecName: "kube-api-access-4rg2z") pod "acd6e216-4534-4c7a-ab49-94213536db2c" (UID: "acd6e216-4534-4c7a-ab49-94213536db2c"). InnerVolumeSpecName "kube-api-access-4rg2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.475647 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r64vw\" (UniqueName: \"kubernetes.io/projected/6a31f534-f99e-4471-a17f-4630288d7353-kube-api-access-r64vw\") pod \"dnsmasq-dns-698758b865-s9krd\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.546955 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rg2z\" (UniqueName: \"kubernetes.io/projected/acd6e216-4534-4c7a-ab49-94213536db2c-kube-api-access-4rg2z\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.556010 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.767058 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.856634 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-ovsdbserver-sb\") pod \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.856691 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-dns-svc\") pod \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.856776 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-config\") pod \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.856904 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tb8mn\" (UniqueName: \"kubernetes.io/projected/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-kube-api-access-tb8mn\") pod \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\" (UID: \"7c115fd8-7c9c-49b9-abbb-295caa3a90e4\") " Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.902529 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-kube-api-access-tb8mn" (OuterVolumeSpecName: "kube-api-access-tb8mn") pod "7c115fd8-7c9c-49b9-abbb-295caa3a90e4" (UID: "7c115fd8-7c9c-49b9-abbb-295caa3a90e4"). InnerVolumeSpecName "kube-api-access-tb8mn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.962649 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tb8mn\" (UniqueName: \"kubernetes.io/projected/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-kube-api-access-tb8mn\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:48 crc kubenswrapper[4948]: I0120 20:04:48.964010 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7c115fd8-7c9c-49b9-abbb-295caa3a90e4" (UID: "7c115fd8-7c9c-49b9-abbb-295caa3a90e4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.049464 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-config" (OuterVolumeSpecName: "config") pod "7c115fd8-7c9c-49b9-abbb-295caa3a90e4" (UID: "7c115fd8-7c9c-49b9-abbb-295caa3a90e4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.059426 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7c115fd8-7c9c-49b9-abbb-295caa3a90e4" (UID: "7c115fd8-7c9c-49b9-abbb-295caa3a90e4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.063933 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.063970 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.063983 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c115fd8-7c9c-49b9-abbb-295caa3a90e4-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.215581 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-4a12-account-create-update-l49lt"] Jan 20 20:04:49 crc kubenswrapper[4948]: W0120 20:04:49.229504 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d2ae321_a5cb_4018_8899_7de265e16c0f.slice/crio-df16ae1c74ddb9ed736cbe952f4810536ecbb838b0b8e8abc09954702716acd7 WatchSource:0}: Error finding container df16ae1c74ddb9ed736cbe952f4810536ecbb838b0b8e8abc09954702716acd7: Status 404 returned error can't find the container with id df16ae1c74ddb9ed736cbe952f4810536ecbb838b0b8e8abc09954702716acd7 Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.239854 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8b7f" event={"ID":"896974b3-7b54-41b4-985e-9bfa9849f260","Type":"ContainerStarted","Data":"fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4"} Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.249133 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.249821 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-vw2t4" event={"ID":"7c115fd8-7c9c-49b9-abbb-295caa3a90e4","Type":"ContainerDied","Data":"466fe68d07e7193f6506ad2fd6e46973bd2410c34ac14fef8a82d5d9c7b6ae09"} Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.249882 4948 scope.go:117] "RemoveContainer" containerID="88a329c47bc849d9d81ee64dc2e15e150fd046950685f7f623a9a05450901737" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.261006 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8beae232-ff35-4a9c-9f68-0d9c20e65c67","Type":"ContainerStarted","Data":"3c8c3f9b4c470a151c71bfe1761ecd727389d37123cbe9fd6e532941efbca9b8"} Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.266563 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-8lchs" event={"ID":"acd6e216-4534-4c7a-ab49-94213536db2c","Type":"ContainerDied","Data":"c0de38b251c9268644a376bcbac49f5c8cfb3f74eb34701b1f83cb946c57d55f"} Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.266600 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0de38b251c9268644a376bcbac49f5c8cfb3f74eb34701b1f83cb946c57d55f" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.268183 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-8lchs" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.276353 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-p8b7f" podStartSLOduration=7.446622859 podStartE2EDuration="15.276329876s" podCreationTimestamp="2026-01-20 20:04:34 +0000 UTC" firstStartedPulling="2026-01-20 20:04:40.996403323 +0000 UTC m=+908.947128292" lastFinishedPulling="2026-01-20 20:04:48.82611034 +0000 UTC m=+916.776835309" observedRunningTime="2026-01-20 20:04:49.268477913 +0000 UTC m=+917.219202872" watchObservedRunningTime="2026-01-20 20:04:49.276329876 +0000 UTC m=+917.227054845" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.328418 4948 scope.go:117] "RemoveContainer" containerID="93927fc8df332fcc65c30ab9717117a81426e98decb20d77d75bc00035db8d96" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.356802 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 20 20:04:49 crc kubenswrapper[4948]: E0120 20:04:49.357280 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acd6e216-4534-4c7a-ab49-94213536db2c" containerName="mariadb-account-create-update" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.357299 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="acd6e216-4534-4c7a-ab49-94213536db2c" containerName="mariadb-account-create-update" Jan 20 20:04:49 crc kubenswrapper[4948]: E0120 20:04:49.357331 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" containerName="dnsmasq-dns" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.357338 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" containerName="dnsmasq-dns" Jan 20 20:04:49 crc kubenswrapper[4948]: E0120 20:04:49.357354 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" containerName="init" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.357362 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" containerName="init" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.357551 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="acd6e216-4534-4c7a-ab49-94213536db2c" containerName="mariadb-account-create-update" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.357579 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" containerName="dnsmasq-dns" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.366184 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-vw2t4"] Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.366323 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.374218 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-vw2t4"] Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.375012 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-zcwdb" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.390026 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.392240 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.392261 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.392668 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.500775 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253a8193-904e-4f62-adbe-597b97b4fd30-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.501148 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.501180 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.501232 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzxsb\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-kube-api-access-gzxsb\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.501281 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/253a8193-904e-4f62-adbe-597b97b4fd30-cache\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.501297 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/253a8193-904e-4f62-adbe-597b97b4fd30-lock\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.522986 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-wfsm8"] Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.537292 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-k8npv"] Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.580691 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b435-account-create-update-fcfpr"] Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.603123 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/253a8193-904e-4f62-adbe-597b97b4fd30-cache\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.603194 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/253a8193-904e-4f62-adbe-597b97b4fd30-lock\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.603368 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253a8193-904e-4f62-adbe-597b97b4fd30-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.603396 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.603443 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.603509 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzxsb\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-kube-api-access-gzxsb\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.605201 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.608300 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/253a8193-904e-4f62-adbe-597b97b4fd30-lock\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.608583 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/253a8193-904e-4f62-adbe-597b97b4fd30-cache\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: E0120 20:04:49.609117 4948 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 20:04:49 crc kubenswrapper[4948]: E0120 20:04:49.609686 4948 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 20:04:49 crc kubenswrapper[4948]: E0120 20:04:49.610180 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift podName:253a8193-904e-4f62-adbe-597b97b4fd30 nodeName:}" failed. No retries permitted until 2026-01-20 20:04:50.110153282 +0000 UTC m=+918.060878251 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift") pod "swift-storage-0" (UID: "253a8193-904e-4f62-adbe-597b97b4fd30") : configmap "swift-ring-files" not found Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.616993 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253a8193-904e-4f62-adbe-597b97b4fd30-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.619308 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1cf5-account-create-update-tjktc"] Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.632234 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzxsb\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-kube-api-access-gzxsb\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.668973 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.682665 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-dz2hg"] Jan 20 20:04:49 crc kubenswrapper[4948]: I0120 20:04:49.839524 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-s9krd"] Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.111506 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:50 crc kubenswrapper[4948]: E0120 20:04:50.111721 4948 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 20:04:50 crc kubenswrapper[4948]: E0120 20:04:50.111744 4948 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 20:04:50 crc kubenswrapper[4948]: E0120 20:04:50.111799 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift podName:253a8193-904e-4f62-adbe-597b97b4fd30 nodeName:}" failed. No retries permitted until 2026-01-20 20:04:51.111780666 +0000 UTC m=+919.062505635 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift") pod "swift-storage-0" (UID: "253a8193-904e-4f62-adbe-597b97b4fd30") : configmap "swift-ring-files" not found Jan 20 20:04:50 crc kubenswrapper[4948]: W0120 20:04:50.232959 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a31f534_f99e_4471_a17f_4630288d7353.slice/crio-891a6bfe2dbdf40e170ff948217ed9033207f2476224f6e4044bee867744df2c WatchSource:0}: Error finding container 891a6bfe2dbdf40e170ff948217ed9033207f2476224f6e4044bee867744df2c: Status 404 returned error can't find the container with id 891a6bfe2dbdf40e170ff948217ed9033207f2476224f6e4044bee867744df2c Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.255009 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.255087 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.276222 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1cf5-account-create-update-tjktc" event={"ID":"dc011d48-6711-420d-911f-ffda06687982","Type":"ContainerStarted","Data":"c08bf59aa432172275d57df3a0d4fa22e84b3c6123fda5eeabb1819c5ce62f45"} Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.287925 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wfsm8" event={"ID":"8e7c10dc-5215-41dc-80b4-00bc47be99e8","Type":"ContainerStarted","Data":"98f9d24b32b4b3e1fef828963fb3e97a22e49aa3fb820e8156929fa290b29132"} Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.324633 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4a12-account-create-update-l49lt" event={"ID":"0d2ae321-a5cb-4018-8899-7de265e16c0f","Type":"ContainerStarted","Data":"c4c10f262615f33b3d0f2b4f178201c8c68bd21518766373085d4d53523b1eae"} Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.324688 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4a12-account-create-update-l49lt" event={"ID":"0d2ae321-a5cb-4018-8899-7de265e16c0f","Type":"ContainerStarted","Data":"df16ae1c74ddb9ed736cbe952f4810536ecbb838b0b8e8abc09954702716acd7"} Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.351053 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dz2hg" event={"ID":"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe","Type":"ContainerStarted","Data":"320c4c4a950f10525900bd9fc336ca7ad418222e5db5eb49add79e4176ff150e"} Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.360671 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-s9krd" event={"ID":"6a31f534-f99e-4471-a17f-4630288d7353","Type":"ContainerStarted","Data":"891a6bfe2dbdf40e170ff948217ed9033207f2476224f6e4044bee867744df2c"} Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.362846 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-k8npv" event={"ID":"c3cfb075-5fb9-4769-be33-338ef93623d2","Type":"ContainerStarted","Data":"8f9238a3aa7cb710f6e8e3b1b4e5d29b7816df1427632a8b35552d16ea07d478"} Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.377068 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b435-account-create-update-fcfpr" event={"ID":"86e10f1b-6bf7-4a69-b49d-b360c73a5a65","Type":"ContainerStarted","Data":"ca0dd00b153b26e6b91611cf7287124304bf924d7d46fc4970f0baf2bf184a69"} Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.421788 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-4a12-account-create-update-l49lt" podStartSLOduration=5.421762576 podStartE2EDuration="5.421762576s" podCreationTimestamp="2026-01-20 20:04:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:04:50.394949876 +0000 UTC m=+918.345674865" watchObservedRunningTime="2026-01-20 20:04:50.421762576 +0000 UTC m=+918.372487545" Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.423715 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-k8npv" podStartSLOduration=5.423695321 podStartE2EDuration="5.423695321s" podCreationTimestamp="2026-01-20 20:04:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:04:50.412630047 +0000 UTC m=+918.363355016" watchObservedRunningTime="2026-01-20 20:04:50.423695321 +0000 UTC m=+918.374420290" Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.435694 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-b435-account-create-update-fcfpr" podStartSLOduration=5.435673441 podStartE2EDuration="5.435673441s" podCreationTimestamp="2026-01-20 20:04:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:04:50.42859503 +0000 UTC m=+918.379319999" watchObservedRunningTime="2026-01-20 20:04:50.435673441 +0000 UTC m=+918.386398410" Jan 20 20:04:50 crc kubenswrapper[4948]: I0120 20:04:50.583421 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c115fd8-7c9c-49b9-abbb-295caa3a90e4" path="/var/lib/kubelet/pods/7c115fd8-7c9c-49b9-abbb-295caa3a90e4/volumes" Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.139056 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:51 crc kubenswrapper[4948]: E0120 20:04:51.139330 4948 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 20:04:51 crc kubenswrapper[4948]: E0120 20:04:51.139350 4948 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 20:04:51 crc kubenswrapper[4948]: E0120 20:04:51.139393 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift podName:253a8193-904e-4f62-adbe-597b97b4fd30 nodeName:}" failed. No retries permitted until 2026-01-20 20:04:53.139378665 +0000 UTC m=+921.090103634 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift") pod "swift-storage-0" (UID: "253a8193-904e-4f62-adbe-597b97b4fd30") : configmap "swift-ring-files" not found Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.386749 4948 generic.go:334] "Generic (PLEG): container finished" podID="c3cfb075-5fb9-4769-be33-338ef93623d2" containerID="4d3fb988a1876ed7e13f28cc46ea16777ee911a7ddbf2a6c6561560b10a2a2d7" exitCode=0 Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.386831 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-k8npv" event={"ID":"c3cfb075-5fb9-4769-be33-338ef93623d2","Type":"ContainerDied","Data":"4d3fb988a1876ed7e13f28cc46ea16777ee911a7ddbf2a6c6561560b10a2a2d7"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.388946 4948 generic.go:334] "Generic (PLEG): container finished" podID="86e10f1b-6bf7-4a69-b49d-b360c73a5a65" containerID="11e35f9e35e38f3774a9245fea8df92163ef58a8b0cee8e17f3e329a11eee9a4" exitCode=0 Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.388987 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b435-account-create-update-fcfpr" event={"ID":"86e10f1b-6bf7-4a69-b49d-b360c73a5a65","Type":"ContainerDied","Data":"11e35f9e35e38f3774a9245fea8df92163ef58a8b0cee8e17f3e329a11eee9a4"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.390927 4948 generic.go:334] "Generic (PLEG): container finished" podID="dc011d48-6711-420d-911f-ffda06687982" containerID="56cf946b72fd6400f6553e68ff608fc33e326132899c51983ea7068ac01c3a45" exitCode=0 Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.391009 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1cf5-account-create-update-tjktc" event={"ID":"dc011d48-6711-420d-911f-ffda06687982","Type":"ContainerDied","Data":"56cf946b72fd6400f6553e68ff608fc33e326132899c51983ea7068ac01c3a45"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.392597 4948 generic.go:334] "Generic (PLEG): container finished" podID="8e7c10dc-5215-41dc-80b4-00bc47be99e8" containerID="eb6af1732ec62a3656f727a9805834f662bb4918873f2b6262147d59f1b9daec" exitCode=0 Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.392625 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wfsm8" event={"ID":"8e7c10dc-5215-41dc-80b4-00bc47be99e8","Type":"ContainerDied","Data":"eb6af1732ec62a3656f727a9805834f662bb4918873f2b6262147d59f1b9daec"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.394343 4948 generic.go:334] "Generic (PLEG): container finished" podID="0d2ae321-a5cb-4018-8899-7de265e16c0f" containerID="c4c10f262615f33b3d0f2b4f178201c8c68bd21518766373085d4d53523b1eae" exitCode=0 Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.394438 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4a12-account-create-update-l49lt" event={"ID":"0d2ae321-a5cb-4018-8899-7de265e16c0f","Type":"ContainerDied","Data":"c4c10f262615f33b3d0f2b4f178201c8c68bd21518766373085d4d53523b1eae"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.396463 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8beae232-ff35-4a9c-9f68-0d9c20e65c67","Type":"ContainerStarted","Data":"96541ed11dcd8503465e47c5a602a7de347b4cd6e4103ed09550be033652b4d8"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.396506 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"8beae232-ff35-4a9c-9f68-0d9c20e65c67","Type":"ContainerStarted","Data":"665a108ff114a0d56fd6b3de87137c4a1c6d3d5aac593db2c7ae8b9b254252bd"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.396556 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.397951 4948 generic.go:334] "Generic (PLEG): container finished" podID="4ce6b227-ed6f-44d8-b9d1-e906bd3457fe" containerID="c377324355f9239526d0e3fff649587a9f90f4a2f61c332105da841c2a05a87a" exitCode=0 Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.397982 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dz2hg" event={"ID":"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe","Type":"ContainerDied","Data":"c377324355f9239526d0e3fff649587a9f90f4a2f61c332105da841c2a05a87a"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.399533 4948 generic.go:334] "Generic (PLEG): container finished" podID="6a31f534-f99e-4471-a17f-4630288d7353" containerID="27137d022dd88abfc6ff794f1a1c3042741eab6ed11987f0c2beb7e54518d22b" exitCode=0 Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.399567 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-s9krd" event={"ID":"6a31f534-f99e-4471-a17f-4630288d7353","Type":"ContainerDied","Data":"27137d022dd88abfc6ff794f1a1c3042741eab6ed11987f0c2beb7e54518d22b"} Jan 20 20:04:51 crc kubenswrapper[4948]: I0120 20:04:51.509056 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=5.395306938 podStartE2EDuration="7.509010585s" podCreationTimestamp="2026-01-20 20:04:44 +0000 UTC" firstStartedPulling="2026-01-20 20:04:48.180591945 +0000 UTC m=+916.131316914" lastFinishedPulling="2026-01-20 20:04:50.294295592 +0000 UTC m=+918.245020561" observedRunningTime="2026-01-20 20:04:51.50531368 +0000 UTC m=+919.456038649" watchObservedRunningTime="2026-01-20 20:04:51.509010585 +0000 UTC m=+919.459735554" Jan 20 20:04:52 crc kubenswrapper[4948]: I0120 20:04:52.409414 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-s9krd" event={"ID":"6a31f534-f99e-4471-a17f-4630288d7353","Type":"ContainerStarted","Data":"10c220feebb03a65e036f269bbe8754201aacf46d58778445755d547aafd1795"} Jan 20 20:04:52 crc kubenswrapper[4948]: I0120 20:04:52.447304 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-s9krd" podStartSLOduration=4.447279591 podStartE2EDuration="4.447279591s" podCreationTimestamp="2026-01-20 20:04:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:04:52.431267627 +0000 UTC m=+920.381992616" watchObservedRunningTime="2026-01-20 20:04:52.447279591 +0000 UTC m=+920.398004560" Jan 20 20:04:52 crc kubenswrapper[4948]: I0120 20:04:52.862149 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:52 crc kubenswrapper[4948]: I0120 20:04:52.971808 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2ae321-a5cb-4018-8899-7de265e16c0f-operator-scripts\") pod \"0d2ae321-a5cb-4018-8899-7de265e16c0f\" (UID: \"0d2ae321-a5cb-4018-8899-7de265e16c0f\") " Jan 20 20:04:52 crc kubenswrapper[4948]: I0120 20:04:52.972257 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-png28\" (UniqueName: \"kubernetes.io/projected/0d2ae321-a5cb-4018-8899-7de265e16c0f-kube-api-access-png28\") pod \"0d2ae321-a5cb-4018-8899-7de265e16c0f\" (UID: \"0d2ae321-a5cb-4018-8899-7de265e16c0f\") " Jan 20 20:04:52 crc kubenswrapper[4948]: I0120 20:04:52.972294 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d2ae321-a5cb-4018-8899-7de265e16c0f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0d2ae321-a5cb-4018-8899-7de265e16c0f" (UID: "0d2ae321-a5cb-4018-8899-7de265e16c0f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:52 crc kubenswrapper[4948]: I0120 20:04:52.997141 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d2ae321-a5cb-4018-8899-7de265e16c0f-kube-api-access-png28" (OuterVolumeSpecName: "kube-api-access-png28") pod "0d2ae321-a5cb-4018-8899-7de265e16c0f" (UID: "0d2ae321-a5cb-4018-8899-7de265e16c0f"). InnerVolumeSpecName "kube-api-access-png28". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.074938 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d2ae321-a5cb-4018-8899-7de265e16c0f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.074968 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-png28\" (UniqueName: \"kubernetes.io/projected/0d2ae321-a5cb-4018-8899-7de265e16c0f-kube-api-access-png28\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.180275 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:53 crc kubenswrapper[4948]: E0120 20:04:53.180549 4948 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 20:04:53 crc kubenswrapper[4948]: E0120 20:04:53.180578 4948 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 20:04:53 crc kubenswrapper[4948]: E0120 20:04:53.180645 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift podName:253a8193-904e-4f62-adbe-597b97b4fd30 nodeName:}" failed. No retries permitted until 2026-01-20 20:04:57.180623275 +0000 UTC m=+925.131348234 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift") pod "swift-storage-0" (UID: "253a8193-904e-4f62-adbe-597b97b4fd30") : configmap "swift-ring-files" not found Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.253038 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.282930 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.289146 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306084 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-ctgvx"] Jan 20 20:04:53 crc kubenswrapper[4948]: E0120 20:04:53.306436 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d2ae321-a5cb-4018-8899-7de265e16c0f" containerName="mariadb-account-create-update" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306448 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d2ae321-a5cb-4018-8899-7de265e16c0f" containerName="mariadb-account-create-update" Jan 20 20:04:53 crc kubenswrapper[4948]: E0120 20:04:53.306472 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ce6b227-ed6f-44d8-b9d1-e906bd3457fe" containerName="mariadb-database-create" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306478 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ce6b227-ed6f-44d8-b9d1-e906bd3457fe" containerName="mariadb-database-create" Jan 20 20:04:53 crc kubenswrapper[4948]: E0120 20:04:53.306492 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e7c10dc-5215-41dc-80b4-00bc47be99e8" containerName="mariadb-database-create" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306498 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e7c10dc-5215-41dc-80b4-00bc47be99e8" containerName="mariadb-database-create" Jan 20 20:04:53 crc kubenswrapper[4948]: E0120 20:04:53.306514 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86e10f1b-6bf7-4a69-b49d-b360c73a5a65" containerName="mariadb-account-create-update" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306519 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="86e10f1b-6bf7-4a69-b49d-b360c73a5a65" containerName="mariadb-account-create-update" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306675 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d2ae321-a5cb-4018-8899-7de265e16c0f" containerName="mariadb-account-create-update" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306684 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ce6b227-ed6f-44d8-b9d1-e906bd3457fe" containerName="mariadb-database-create" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306712 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e7c10dc-5215-41dc-80b4-00bc47be99e8" containerName="mariadb-database-create" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.306722 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="86e10f1b-6bf7-4a69-b49d-b360c73a5a65" containerName="mariadb-account-create-update" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.307230 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.311739 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.311928 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.312802 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.316663 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-ctgvx"] Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.319953 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.321331 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-k8npv" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.409557 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpjjr\" (UniqueName: \"kubernetes.io/projected/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-kube-api-access-rpjjr\") pod \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\" (UID: \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.409976 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-operator-scripts\") pod \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\" (UID: \"86e10f1b-6bf7-4a69-b49d-b360c73a5a65\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410035 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmdvj\" (UniqueName: \"kubernetes.io/projected/c3cfb075-5fb9-4769-be33-338ef93623d2-kube-api-access-cmdvj\") pod \"c3cfb075-5fb9-4769-be33-338ef93623d2\" (UID: \"c3cfb075-5fb9-4769-be33-338ef93623d2\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410101 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-operator-scripts\") pod \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\" (UID: \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410126 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9bzk\" (UniqueName: \"kubernetes.io/projected/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-kube-api-access-j9bzk\") pod \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\" (UID: \"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410206 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3cfb075-5fb9-4769-be33-338ef93623d2-operator-scripts\") pod \"c3cfb075-5fb9-4769-be33-338ef93623d2\" (UID: \"c3cfb075-5fb9-4769-be33-338ef93623d2\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410285 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dql6\" (UniqueName: \"kubernetes.io/projected/dc011d48-6711-420d-911f-ffda06687982-kube-api-access-8dql6\") pod \"dc011d48-6711-420d-911f-ffda06687982\" (UID: \"dc011d48-6711-420d-911f-ffda06687982\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410333 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc011d48-6711-420d-911f-ffda06687982-operator-scripts\") pod \"dc011d48-6711-420d-911f-ffda06687982\" (UID: \"dc011d48-6711-420d-911f-ffda06687982\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410390 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chstt\" (UniqueName: \"kubernetes.io/projected/8e7c10dc-5215-41dc-80b4-00bc47be99e8-kube-api-access-chstt\") pod \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\" (UID: \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410436 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e7c10dc-5215-41dc-80b4-00bc47be99e8-operator-scripts\") pod \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\" (UID: \"8e7c10dc-5215-41dc-80b4-00bc47be99e8\") " Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410665 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-swiftconf\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410739 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-etc-swift\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410771 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-scripts\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410810 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-combined-ca-bundle\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410893 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6swn\" (UniqueName: \"kubernetes.io/projected/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-kube-api-access-n6swn\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410919 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-dispersionconf\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.410952 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-ring-data-devices\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.411460 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "86e10f1b-6bf7-4a69-b49d-b360c73a5a65" (UID: "86e10f1b-6bf7-4a69-b49d-b360c73a5a65"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.412536 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-kube-api-access-rpjjr" (OuterVolumeSpecName: "kube-api-access-rpjjr") pod "86e10f1b-6bf7-4a69-b49d-b360c73a5a65" (UID: "86e10f1b-6bf7-4a69-b49d-b360c73a5a65"). InnerVolumeSpecName "kube-api-access-rpjjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.413199 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc011d48-6711-420d-911f-ffda06687982-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dc011d48-6711-420d-911f-ffda06687982" (UID: "dc011d48-6711-420d-911f-ffda06687982"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.416797 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3cfb075-5fb9-4769-be33-338ef93623d2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c3cfb075-5fb9-4769-be33-338ef93623d2" (UID: "c3cfb075-5fb9-4769-be33-338ef93623d2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.419009 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4ce6b227-ed6f-44d8-b9d1-e906bd3457fe" (UID: "4ce6b227-ed6f-44d8-b9d1-e906bd3457fe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.419553 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e7c10dc-5215-41dc-80b4-00bc47be99e8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8e7c10dc-5215-41dc-80b4-00bc47be99e8" (UID: "8e7c10dc-5215-41dc-80b4-00bc47be99e8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.421692 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc011d48-6711-420d-911f-ffda06687982-kube-api-access-8dql6" (OuterVolumeSpecName: "kube-api-access-8dql6") pod "dc011d48-6711-420d-911f-ffda06687982" (UID: "dc011d48-6711-420d-911f-ffda06687982"). InnerVolumeSpecName "kube-api-access-8dql6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.424200 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-kube-api-access-j9bzk" (OuterVolumeSpecName: "kube-api-access-j9bzk") pod "4ce6b227-ed6f-44d8-b9d1-e906bd3457fe" (UID: "4ce6b227-ed6f-44d8-b9d1-e906bd3457fe"). InnerVolumeSpecName "kube-api-access-j9bzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.425166 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e7c10dc-5215-41dc-80b4-00bc47be99e8-kube-api-access-chstt" (OuterVolumeSpecName: "kube-api-access-chstt") pod "8e7c10dc-5215-41dc-80b4-00bc47be99e8" (UID: "8e7c10dc-5215-41dc-80b4-00bc47be99e8"). InnerVolumeSpecName "kube-api-access-chstt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.426048 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3cfb075-5fb9-4769-be33-338ef93623d2-kube-api-access-cmdvj" (OuterVolumeSpecName: "kube-api-access-cmdvj") pod "c3cfb075-5fb9-4769-be33-338ef93623d2" (UID: "c3cfb075-5fb9-4769-be33-338ef93623d2"). InnerVolumeSpecName "kube-api-access-cmdvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.454097 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-k8npv" event={"ID":"c3cfb075-5fb9-4769-be33-338ef93623d2","Type":"ContainerDied","Data":"8f9238a3aa7cb710f6e8e3b1b4e5d29b7816df1427632a8b35552d16ea07d478"} Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.454146 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f9238a3aa7cb710f6e8e3b1b4e5d29b7816df1427632a8b35552d16ea07d478" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.454226 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-k8npv" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.479046 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b435-account-create-update-fcfpr" event={"ID":"86e10f1b-6bf7-4a69-b49d-b360c73a5a65","Type":"ContainerDied","Data":"ca0dd00b153b26e6b91611cf7287124304bf924d7d46fc4970f0baf2bf184a69"} Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.479084 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca0dd00b153b26e6b91611cf7287124304bf924d7d46fc4970f0baf2bf184a69" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.479142 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b435-account-create-update-fcfpr" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.509071 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1cf5-account-create-update-tjktc" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.512549 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wfsm8" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.515797 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1cf5-account-create-update-tjktc" event={"ID":"dc011d48-6711-420d-911f-ffda06687982","Type":"ContainerDied","Data":"c08bf59aa432172275d57df3a0d4fa22e84b3c6123fda5eeabb1819c5ce62f45"} Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.515883 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c08bf59aa432172275d57df3a0d4fa22e84b3c6123fda5eeabb1819c5ce62f45" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.515906 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wfsm8" event={"ID":"8e7c10dc-5215-41dc-80b4-00bc47be99e8","Type":"ContainerDied","Data":"98f9d24b32b4b3e1fef828963fb3e97a22e49aa3fb820e8156929fa290b29132"} Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.515933 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98f9d24b32b4b3e1fef828963fb3e97a22e49aa3fb820e8156929fa290b29132" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.516941 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6swn\" (UniqueName: \"kubernetes.io/projected/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-kube-api-access-n6swn\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.516972 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-dispersionconf\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517028 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-ring-data-devices\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517119 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-swiftconf\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517193 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-etc-swift\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517251 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-scripts\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517276 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-combined-ca-bundle\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517427 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chstt\" (UniqueName: \"kubernetes.io/projected/8e7c10dc-5215-41dc-80b4-00bc47be99e8-kube-api-access-chstt\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517445 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e7c10dc-5215-41dc-80b4-00bc47be99e8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517456 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpjjr\" (UniqueName: \"kubernetes.io/projected/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-kube-api-access-rpjjr\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517487 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/86e10f1b-6bf7-4a69-b49d-b360c73a5a65-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517499 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmdvj\" (UniqueName: \"kubernetes.io/projected/c3cfb075-5fb9-4769-be33-338ef93623d2-kube-api-access-cmdvj\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517511 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517526 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9bzk\" (UniqueName: \"kubernetes.io/projected/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe-kube-api-access-j9bzk\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517538 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3cfb075-5fb9-4769-be33-338ef93623d2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517571 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dql6\" (UniqueName: \"kubernetes.io/projected/dc011d48-6711-420d-911f-ffda06687982-kube-api-access-8dql6\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.517583 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc011d48-6711-420d-911f-ffda06687982-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.518413 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-ring-data-devices\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.519310 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-scripts\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.529051 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-etc-swift\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.529490 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-4a12-account-create-update-l49lt" event={"ID":"0d2ae321-a5cb-4018-8899-7de265e16c0f","Type":"ContainerDied","Data":"df16ae1c74ddb9ed736cbe952f4810536ecbb838b0b8e8abc09954702716acd7"} Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.529576 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df16ae1c74ddb9ed736cbe952f4810536ecbb838b0b8e8abc09954702716acd7" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.529778 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-4a12-account-create-update-l49lt" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.538552 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-swiftconf\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.555297 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-dispersionconf\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.555404 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6swn\" (UniqueName: \"kubernetes.io/projected/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-kube-api-access-n6swn\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.560211 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-combined-ca-bundle\") pod \"swift-ring-rebalance-ctgvx\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.560354 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.582023 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-dz2hg" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.590483 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-dz2hg" event={"ID":"4ce6b227-ed6f-44d8-b9d1-e906bd3457fe","Type":"ContainerDied","Data":"320c4c4a950f10525900bd9fc336ca7ad418222e5db5eb49add79e4176ff150e"} Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.590532 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="320c4c4a950f10525900bd9fc336ca7ad418222e5db5eb49add79e4176ff150e" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.649800 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.730013 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-8lchs"] Jan 20 20:04:53 crc kubenswrapper[4948]: I0120 20:04:53.745165 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-8lchs"] Jan 20 20:04:54 crc kubenswrapper[4948]: I0120 20:04:54.372773 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-ctgvx"] Jan 20 20:04:54 crc kubenswrapper[4948]: W0120 20:04:54.385223 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce6ef66a_e0b9_4dbf_9c1b_262e952e9845.slice/crio-0f9e8af3f8cf01eb352886dfbc0173a52d81018d10c342fee20365367e8413c7 WatchSource:0}: Error finding container 0f9e8af3f8cf01eb352886dfbc0173a52d81018d10c342fee20365367e8413c7: Status 404 returned error can't find the container with id 0f9e8af3f8cf01eb352886dfbc0173a52d81018d10c342fee20365367e8413c7 Jan 20 20:04:54 crc kubenswrapper[4948]: I0120 20:04:54.580031 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acd6e216-4534-4c7a-ab49-94213536db2c" path="/var/lib/kubelet/pods/acd6e216-4534-4c7a-ab49-94213536db2c/volumes" Jan 20 20:04:54 crc kubenswrapper[4948]: I0120 20:04:54.589576 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-ctgvx" event={"ID":"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845","Type":"ContainerStarted","Data":"0f9e8af3f8cf01eb352886dfbc0173a52d81018d10c342fee20365367e8413c7"} Jan 20 20:04:55 crc kubenswrapper[4948]: I0120 20:04:55.032246 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:55 crc kubenswrapper[4948]: I0120 20:04:55.033163 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.089516 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-p8b7f" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="registry-server" probeResult="failure" output=< Jan 20 20:04:56 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:04:56 crc kubenswrapper[4948]: > Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.239323 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-fdwn2"] Jan 20 20:04:56 crc kubenswrapper[4948]: E0120 20:04:56.239732 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc011d48-6711-420d-911f-ffda06687982" containerName="mariadb-account-create-update" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.239751 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc011d48-6711-420d-911f-ffda06687982" containerName="mariadb-account-create-update" Jan 20 20:04:56 crc kubenswrapper[4948]: E0120 20:04:56.239776 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3cfb075-5fb9-4769-be33-338ef93623d2" containerName="mariadb-database-create" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.239783 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3cfb075-5fb9-4769-be33-338ef93623d2" containerName="mariadb-database-create" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.239947 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc011d48-6711-420d-911f-ffda06687982" containerName="mariadb-account-create-update" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.239966 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3cfb075-5fb9-4769-be33-338ef93623d2" containerName="mariadb-database-create" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.240552 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.244440 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-96n9r" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.244782 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.267913 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-fdwn2"] Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.398102 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-config-data\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.398150 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57xcx\" (UniqueName: \"kubernetes.io/projected/d96cb8cd-dfa3-4d70-af44-be9627945b5f-kube-api-access-57xcx\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.398191 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-combined-ca-bundle\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.398290 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-db-sync-config-data\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.499688 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-db-sync-config-data\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.499846 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-config-data\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.499882 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57xcx\" (UniqueName: \"kubernetes.io/projected/d96cb8cd-dfa3-4d70-af44-be9627945b5f-kube-api-access-57xcx\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.499920 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-combined-ca-bundle\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.508236 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-combined-ca-bundle\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.509796 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-db-sync-config-data\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.532320 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-config-data\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.561056 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57xcx\" (UniqueName: \"kubernetes.io/projected/d96cb8cd-dfa3-4d70-af44-be9627945b5f-kube-api-access-57xcx\") pod \"glance-db-sync-fdwn2\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:56 crc kubenswrapper[4948]: I0120 20:04:56.576068 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-fdwn2" Jan 20 20:04:57 crc kubenswrapper[4948]: I0120 20:04:57.211110 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:04:57 crc kubenswrapper[4948]: E0120 20:04:57.211312 4948 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 20:04:57 crc kubenswrapper[4948]: E0120 20:04:57.211340 4948 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 20:04:57 crc kubenswrapper[4948]: E0120 20:04:57.211395 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift podName:253a8193-904e-4f62-adbe-597b97b4fd30 nodeName:}" failed. No retries permitted until 2026-01-20 20:05:05.211378221 +0000 UTC m=+933.162103190 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift") pod "swift-storage-0" (UID: "253a8193-904e-4f62-adbe-597b97b4fd30") : configmap "swift-ring-files" not found Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.557920 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.622132 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-4ckg7"] Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.622664 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" podUID="eacc8f3b-677c-4e7c-b507-a885147a2448" containerName="dnsmasq-dns" containerID="cri-o://e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5" gracePeriod=10 Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.741953 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-spj97"] Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.743266 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-spj97" Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.748541 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.754236 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-spj97"] Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.894534 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbzkt\" (UniqueName: \"kubernetes.io/projected/aead4ceb-154b-4822-b17a-46313fc78eaf-kube-api-access-cbzkt\") pod \"root-account-create-update-spj97\" (UID: \"aead4ceb-154b-4822-b17a-46313fc78eaf\") " pod="openstack/root-account-create-update-spj97" Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.894622 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aead4ceb-154b-4822-b17a-46313fc78eaf-operator-scripts\") pod \"root-account-create-update-spj97\" (UID: \"aead4ceb-154b-4822-b17a-46313fc78eaf\") " pod="openstack/root-account-create-update-spj97" Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.995960 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbzkt\" (UniqueName: \"kubernetes.io/projected/aead4ceb-154b-4822-b17a-46313fc78eaf-kube-api-access-cbzkt\") pod \"root-account-create-update-spj97\" (UID: \"aead4ceb-154b-4822-b17a-46313fc78eaf\") " pod="openstack/root-account-create-update-spj97" Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.996070 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aead4ceb-154b-4822-b17a-46313fc78eaf-operator-scripts\") pod \"root-account-create-update-spj97\" (UID: \"aead4ceb-154b-4822-b17a-46313fc78eaf\") " pod="openstack/root-account-create-update-spj97" Jan 20 20:04:58 crc kubenswrapper[4948]: I0120 20:04:58.997242 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aead4ceb-154b-4822-b17a-46313fc78eaf-operator-scripts\") pod \"root-account-create-update-spj97\" (UID: \"aead4ceb-154b-4822-b17a-46313fc78eaf\") " pod="openstack/root-account-create-update-spj97" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.023644 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbzkt\" (UniqueName: \"kubernetes.io/projected/aead4ceb-154b-4822-b17a-46313fc78eaf-kube-api-access-cbzkt\") pod \"root-account-create-update-spj97\" (UID: \"aead4ceb-154b-4822-b17a-46313fc78eaf\") " pod="openstack/root-account-create-update-spj97" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.079725 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-spj97" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.442927 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.606885 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-dns-svc\") pod \"eacc8f3b-677c-4e7c-b507-a885147a2448\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.606996 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-sb\") pod \"eacc8f3b-677c-4e7c-b507-a885147a2448\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.607056 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gd64\" (UniqueName: \"kubernetes.io/projected/eacc8f3b-677c-4e7c-b507-a885147a2448-kube-api-access-9gd64\") pod \"eacc8f3b-677c-4e7c-b507-a885147a2448\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.607153 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-config\") pod \"eacc8f3b-677c-4e7c-b507-a885147a2448\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.607182 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-nb\") pod \"eacc8f3b-677c-4e7c-b507-a885147a2448\" (UID: \"eacc8f3b-677c-4e7c-b507-a885147a2448\") " Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.613984 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eacc8f3b-677c-4e7c-b507-a885147a2448-kube-api-access-9gd64" (OuterVolumeSpecName: "kube-api-access-9gd64") pod "eacc8f3b-677c-4e7c-b507-a885147a2448" (UID: "eacc8f3b-677c-4e7c-b507-a885147a2448"). InnerVolumeSpecName "kube-api-access-9gd64". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.670875 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eacc8f3b-677c-4e7c-b507-a885147a2448" (UID: "eacc8f3b-677c-4e7c-b507-a885147a2448"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.673128 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-ctgvx" event={"ID":"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845","Type":"ContainerStarted","Data":"dab32a5d3c9cd2c80c9e93e11d9a18766fa9686ece61aa0e3c1fcc3405e973ff"} Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.683912 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "eacc8f3b-677c-4e7c-b507-a885147a2448" (UID: "eacc8f3b-677c-4e7c-b507-a885147a2448"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.695002 4948 generic.go:334] "Generic (PLEG): container finished" podID="eacc8f3b-677c-4e7c-b507-a885147a2448" containerID="e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5" exitCode=0 Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.695049 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" event={"ID":"eacc8f3b-677c-4e7c-b507-a885147a2448","Type":"ContainerDied","Data":"e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5"} Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.695078 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" event={"ID":"eacc8f3b-677c-4e7c-b507-a885147a2448","Type":"ContainerDied","Data":"b5d1051970d2eba069ac2261886125692d7caa4cfc7f98f93424ec2b4bf32ccf"} Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.695097 4948 scope.go:117] "RemoveContainer" containerID="e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.695274 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-4ckg7" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.706035 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-ctgvx" podStartSLOduration=1.924102363 podStartE2EDuration="6.706016218s" podCreationTimestamp="2026-01-20 20:04:53 +0000 UTC" firstStartedPulling="2026-01-20 20:04:54.387350093 +0000 UTC m=+922.338075062" lastFinishedPulling="2026-01-20 20:04:59.169263948 +0000 UTC m=+927.119988917" observedRunningTime="2026-01-20 20:04:59.705948626 +0000 UTC m=+927.656673595" watchObservedRunningTime="2026-01-20 20:04:59.706016218 +0000 UTC m=+927.656741177" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.712807 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.712833 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.712844 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gd64\" (UniqueName: \"kubernetes.io/projected/eacc8f3b-677c-4e7c-b507-a885147a2448-kube-api-access-9gd64\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.718812 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "eacc8f3b-677c-4e7c-b507-a885147a2448" (UID: "eacc8f3b-677c-4e7c-b507-a885147a2448"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.736079 4948 scope.go:117] "RemoveContainer" containerID="e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.756409 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-config" (OuterVolumeSpecName: "config") pod "eacc8f3b-677c-4e7c-b507-a885147a2448" (UID: "eacc8f3b-677c-4e7c-b507-a885147a2448"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.762894 4948 scope.go:117] "RemoveContainer" containerID="e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5" Jan 20 20:04:59 crc kubenswrapper[4948]: E0120 20:04:59.763410 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5\": container with ID starting with e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5 not found: ID does not exist" containerID="e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.763460 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5"} err="failed to get container status \"e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5\": rpc error: code = NotFound desc = could not find container \"e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5\": container with ID starting with e257846082d3f5ac638adc95530e61cc77d68bc8ae621c325706c08bea66a7c5 not found: ID does not exist" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.763499 4948 scope.go:117] "RemoveContainer" containerID="e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230" Jan 20 20:04:59 crc kubenswrapper[4948]: E0120 20:04:59.763876 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230\": container with ID starting with e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230 not found: ID does not exist" containerID="e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.763925 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230"} err="failed to get container status \"e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230\": rpc error: code = NotFound desc = could not find container \"e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230\": container with ID starting with e1db9f962ab88865e72cd643186b5ad77ee1766546823a317a4ae7b675e1f230 not found: ID does not exist" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.814850 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.814886 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eacc8f3b-677c-4e7c-b507-a885147a2448-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:04:59 crc kubenswrapper[4948]: W0120 20:04:59.876528 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaead4ceb_154b_4822_b17a_46313fc78eaf.slice/crio-6ecda88da118d0fbdc82e8cba52507bec6ac5b0e91de0e99ce8d7c72c4138186 WatchSource:0}: Error finding container 6ecda88da118d0fbdc82e8cba52507bec6ac5b0e91de0e99ce8d7c72c4138186: Status 404 returned error can't find the container with id 6ecda88da118d0fbdc82e8cba52507bec6ac5b0e91de0e99ce8d7c72c4138186 Jan 20 20:04:59 crc kubenswrapper[4948]: I0120 20:04:59.876815 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-spj97"] Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.033282 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-fdwn2"] Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.059678 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-4ckg7"] Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.071398 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-4ckg7"] Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.363619 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.579501 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eacc8f3b-677c-4e7c-b507-a885147a2448" path="/var/lib/kubelet/pods/eacc8f3b-677c-4e7c-b507-a885147a2448/volumes" Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.703964 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-fdwn2" event={"ID":"d96cb8cd-dfa3-4d70-af44-be9627945b5f","Type":"ContainerStarted","Data":"de457b35af9759c6a88ff8065b022d29ab38b2e0f7b211d2f321e65f604a8b14"} Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.706127 4948 generic.go:334] "Generic (PLEG): container finished" podID="aead4ceb-154b-4822-b17a-46313fc78eaf" containerID="ce3bec0a8712e92a4b3d09259b2b9f48aea48bbcb17bba61a24bd447edd4bd71" exitCode=0 Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.707209 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-spj97" event={"ID":"aead4ceb-154b-4822-b17a-46313fc78eaf","Type":"ContainerDied","Data":"ce3bec0a8712e92a4b3d09259b2b9f48aea48bbcb17bba61a24bd447edd4bd71"} Jan 20 20:05:00 crc kubenswrapper[4948]: I0120 20:05:00.707243 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-spj97" event={"ID":"aead4ceb-154b-4822-b17a-46313fc78eaf","Type":"ContainerStarted","Data":"6ecda88da118d0fbdc82e8cba52507bec6ac5b0e91de0e99ce8d7c72c4138186"} Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.076073 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-spj97" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.122045 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xlcmv"] Jan 20 20:05:02 crc kubenswrapper[4948]: E0120 20:05:02.122451 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eacc8f3b-677c-4e7c-b507-a885147a2448" containerName="dnsmasq-dns" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.122472 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="eacc8f3b-677c-4e7c-b507-a885147a2448" containerName="dnsmasq-dns" Jan 20 20:05:02 crc kubenswrapper[4948]: E0120 20:05:02.122505 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aead4ceb-154b-4822-b17a-46313fc78eaf" containerName="mariadb-account-create-update" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.122516 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="aead4ceb-154b-4822-b17a-46313fc78eaf" containerName="mariadb-account-create-update" Jan 20 20:05:02 crc kubenswrapper[4948]: E0120 20:05:02.122526 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eacc8f3b-677c-4e7c-b507-a885147a2448" containerName="init" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.122534 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="eacc8f3b-677c-4e7c-b507-a885147a2448" containerName="init" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.122693 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="eacc8f3b-677c-4e7c-b507-a885147a2448" containerName="dnsmasq-dns" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.122739 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="aead4ceb-154b-4822-b17a-46313fc78eaf" containerName="mariadb-account-create-update" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.124065 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.140032 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xlcmv"] Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.162932 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqljj\" (UniqueName: \"kubernetes.io/projected/8332c140-d061-47f6-b309-973a562bccc6-kube-api-access-zqljj\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.163057 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-catalog-content\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.163084 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-utilities\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.258103 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-hpg27" podUID="46328967-e69a-4d46-86d6-ba1af248c8f2" containerName="ovn-controller" probeResult="failure" output=< Jan 20 20:05:02 crc kubenswrapper[4948]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 20 20:05:02 crc kubenswrapper[4948]: > Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.264305 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aead4ceb-154b-4822-b17a-46313fc78eaf-operator-scripts\") pod \"aead4ceb-154b-4822-b17a-46313fc78eaf\" (UID: \"aead4ceb-154b-4822-b17a-46313fc78eaf\") " Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.264403 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbzkt\" (UniqueName: \"kubernetes.io/projected/aead4ceb-154b-4822-b17a-46313fc78eaf-kube-api-access-cbzkt\") pod \"aead4ceb-154b-4822-b17a-46313fc78eaf\" (UID: \"aead4ceb-154b-4822-b17a-46313fc78eaf\") " Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.264604 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-catalog-content\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.264627 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-utilities\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.264735 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqljj\" (UniqueName: \"kubernetes.io/projected/8332c140-d061-47f6-b309-973a562bccc6-kube-api-access-zqljj\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.264842 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aead4ceb-154b-4822-b17a-46313fc78eaf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aead4ceb-154b-4822-b17a-46313fc78eaf" (UID: "aead4ceb-154b-4822-b17a-46313fc78eaf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.265177 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-catalog-content\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.265184 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-utilities\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.279063 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aead4ceb-154b-4822-b17a-46313fc78eaf-kube-api-access-cbzkt" (OuterVolumeSpecName: "kube-api-access-cbzkt") pod "aead4ceb-154b-4822-b17a-46313fc78eaf" (UID: "aead4ceb-154b-4822-b17a-46313fc78eaf"). InnerVolumeSpecName "kube-api-access-cbzkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.284510 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqljj\" (UniqueName: \"kubernetes.io/projected/8332c140-d061-47f6-b309-973a562bccc6-kube-api-access-zqljj\") pod \"certified-operators-xlcmv\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.368041 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbzkt\" (UniqueName: \"kubernetes.io/projected/aead4ceb-154b-4822-b17a-46313fc78eaf-kube-api-access-cbzkt\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.368076 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aead4ceb-154b-4822-b17a-46313fc78eaf-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.452256 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.762310 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-spj97" event={"ID":"aead4ceb-154b-4822-b17a-46313fc78eaf","Type":"ContainerDied","Data":"6ecda88da118d0fbdc82e8cba52507bec6ac5b0e91de0e99ce8d7c72c4138186"} Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.762618 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ecda88da118d0fbdc82e8cba52507bec6ac5b0e91de0e99ce8d7c72c4138186" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.762694 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-spj97" Jan 20 20:05:02 crc kubenswrapper[4948]: I0120 20:05:02.832213 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xlcmv"] Jan 20 20:05:03 crc kubenswrapper[4948]: I0120 20:05:03.784950 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xlcmv" event={"ID":"8332c140-d061-47f6-b309-973a562bccc6","Type":"ContainerStarted","Data":"adc48e0aac3aaa9f5430ca70ea00ca35266e502b97acd4f84031820abaa83414"} Jan 20 20:05:04 crc kubenswrapper[4948]: I0120 20:05:04.792863 4948 generic.go:334] "Generic (PLEG): container finished" podID="8332c140-d061-47f6-b309-973a562bccc6" containerID="254a7a439497af193dd6aace560c84bbeaaf2d924a9cd29abf9ff5dc361d2732" exitCode=0 Jan 20 20:05:04 crc kubenswrapper[4948]: I0120 20:05:04.792954 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xlcmv" event={"ID":"8332c140-d061-47f6-b309-973a562bccc6","Type":"ContainerDied","Data":"254a7a439497af193dd6aace560c84bbeaaf2d924a9cd29abf9ff5dc361d2732"} Jan 20 20:05:04 crc kubenswrapper[4948]: I0120 20:05:04.797479 4948 generic.go:334] "Generic (PLEG): container finished" podID="e243433b-5932-4d3d-a280-b7999d49e1ec" containerID="eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce" exitCode=0 Jan 20 20:05:04 crc kubenswrapper[4948]: I0120 20:05:04.797543 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e243433b-5932-4d3d-a280-b7999d49e1ec","Type":"ContainerDied","Data":"eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce"} Jan 20 20:05:04 crc kubenswrapper[4948]: I0120 20:05:04.801902 4948 generic.go:334] "Generic (PLEG): container finished" podID="98083b85-e2b1-48e2-82f9-c71019aa2475" containerID="88ea89f84b7617f501ddbb4b9afb6561e4fd047f7d7e5577d0b84b4bdbfe0e71" exitCode=0 Jan 20 20:05:04 crc kubenswrapper[4948]: I0120 20:05:04.801940 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"98083b85-e2b1-48e2-82f9-c71019aa2475","Type":"ContainerDied","Data":"88ea89f84b7617f501ddbb4b9afb6561e4fd047f7d7e5577d0b84b4bdbfe0e71"} Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.103105 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.191698 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.277620 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:05:05 crc kubenswrapper[4948]: E0120 20:05:05.277820 4948 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 20 20:05:05 crc kubenswrapper[4948]: E0120 20:05:05.277836 4948 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 20 20:05:05 crc kubenswrapper[4948]: E0120 20:05:05.277890 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift podName:253a8193-904e-4f62-adbe-597b97b4fd30 nodeName:}" failed. No retries permitted until 2026-01-20 20:05:21.277871013 +0000 UTC m=+949.228595972 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift") pod "swift-storage-0" (UID: "253a8193-904e-4f62-adbe-597b97b4fd30") : configmap "swift-ring-files" not found Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.831813 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e243433b-5932-4d3d-a280-b7999d49e1ec","Type":"ContainerStarted","Data":"d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9"} Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.832341 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.838259 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"98083b85-e2b1-48e2-82f9-c71019aa2475","Type":"ContainerStarted","Data":"1d5035085a041f76275ed70c0ab7e14cebb8b68fc62dcc8a4d27ec6b7211db0d"} Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.839066 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.844654 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xlcmv" event={"ID":"8332c140-d061-47f6-b309-973a562bccc6","Type":"ContainerStarted","Data":"392f0628c298ef4a754588adaef8611274577fc86cd7bd7cd091a9a27105b1cb"} Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.906890 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.648682963 podStartE2EDuration="1m14.906869688s" podCreationTimestamp="2026-01-20 20:03:51 +0000 UTC" firstStartedPulling="2026-01-20 20:03:53.88704083 +0000 UTC m=+861.837765799" lastFinishedPulling="2026-01-20 20:04:31.145227555 +0000 UTC m=+899.095952524" observedRunningTime="2026-01-20 20:05:05.901466445 +0000 UTC m=+933.852191414" watchObservedRunningTime="2026-01-20 20:05:05.906869688 +0000 UTC m=+933.857594657" Jan 20 20:05:05 crc kubenswrapper[4948]: I0120 20:05:05.953204 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371960.901592 podStartE2EDuration="1m15.953184001s" podCreationTimestamp="2026-01-20 20:03:50 +0000 UTC" firstStartedPulling="2026-01-20 20:03:53.08862726 +0000 UTC m=+861.039352229" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:05.944209527 +0000 UTC m=+933.894934496" watchObservedRunningTime="2026-01-20 20:05:05.953184001 +0000 UTC m=+933.903908970" Jan 20 20:05:06 crc kubenswrapper[4948]: I0120 20:05:06.904770 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p8b7f"] Jan 20 20:05:06 crc kubenswrapper[4948]: I0120 20:05:06.912781 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-p8b7f" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="registry-server" containerID="cri-o://fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4" gracePeriod=2 Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.018979 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.064565 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-dgkh9" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.293538 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-hpg27" podUID="46328967-e69a-4d46-86d6-ba1af248c8f2" containerName="ovn-controller" probeResult="failure" output=< Jan 20 20:05:07 crc kubenswrapper[4948]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 20 20:05:07 crc kubenswrapper[4948]: > Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.354851 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-hpg27-config-l26bm"] Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.355846 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.361138 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.373236 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hpg27-config-l26bm"] Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.412635 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.412696 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4gg2\" (UniqueName: \"kubernetes.io/projected/16ff4b98-5002-4a48-9e41-8081b830c8eb-kube-api-access-k4gg2\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.412787 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-additional-scripts\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.412817 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run-ovn\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.412861 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-log-ovn\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.412901 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-scripts\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.515759 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-log-ovn\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.516100 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-scripts\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.516300 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.516443 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4gg2\" (UniqueName: \"kubernetes.io/projected/16ff4b98-5002-4a48-9e41-8081b830c8eb-kube-api-access-k4gg2\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.516613 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-additional-scripts\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.516746 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run-ovn\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.517083 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.517186 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-log-ovn\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.519009 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-scripts\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.519075 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run-ovn\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.541620 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-additional-scripts\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.542713 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4gg2\" (UniqueName: \"kubernetes.io/projected/16ff4b98-5002-4a48-9e41-8081b830c8eb-kube-api-access-k4gg2\") pod \"ovn-controller-hpg27-config-l26bm\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.687053 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.689173 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.823928 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-catalog-content\") pod \"896974b3-7b54-41b4-985e-9bfa9849f260\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.824210 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-utilities\") pod \"896974b3-7b54-41b4-985e-9bfa9849f260\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.824264 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5b4h\" (UniqueName: \"kubernetes.io/projected/896974b3-7b54-41b4-985e-9bfa9849f260-kube-api-access-z5b4h\") pod \"896974b3-7b54-41b4-985e-9bfa9849f260\" (UID: \"896974b3-7b54-41b4-985e-9bfa9849f260\") " Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.829560 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-utilities" (OuterVolumeSpecName: "utilities") pod "896974b3-7b54-41b4-985e-9bfa9849f260" (UID: "896974b3-7b54-41b4-985e-9bfa9849f260"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.833151 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/896974b3-7b54-41b4-985e-9bfa9849f260-kube-api-access-z5b4h" (OuterVolumeSpecName: "kube-api-access-z5b4h") pod "896974b3-7b54-41b4-985e-9bfa9849f260" (UID: "896974b3-7b54-41b4-985e-9bfa9849f260"). InnerVolumeSpecName "kube-api-access-z5b4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.910613 4948 generic.go:334] "Generic (PLEG): container finished" podID="8332c140-d061-47f6-b309-973a562bccc6" containerID="392f0628c298ef4a754588adaef8611274577fc86cd7bd7cd091a9a27105b1cb" exitCode=0 Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.910720 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xlcmv" event={"ID":"8332c140-d061-47f6-b309-973a562bccc6","Type":"ContainerDied","Data":"392f0628c298ef4a754588adaef8611274577fc86cd7bd7cd091a9a27105b1cb"} Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.926123 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.926150 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5b4h\" (UniqueName: \"kubernetes.io/projected/896974b3-7b54-41b4-985e-9bfa9849f260-kube-api-access-z5b4h\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.926639 4948 generic.go:334] "Generic (PLEG): container finished" podID="896974b3-7b54-41b4-985e-9bfa9849f260" containerID="fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4" exitCode=0 Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.926957 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-p8b7f" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.927081 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8b7f" event={"ID":"896974b3-7b54-41b4-985e-9bfa9849f260","Type":"ContainerDied","Data":"fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4"} Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.927137 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-p8b7f" event={"ID":"896974b3-7b54-41b4-985e-9bfa9849f260","Type":"ContainerDied","Data":"0d87a4c0739f4110cda46611883a552739c9cabccdf123bdac9ed62fe68eb4bd"} Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.927160 4948 scope.go:117] "RemoveContainer" containerID="fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4" Jan 20 20:05:07 crc kubenswrapper[4948]: I0120 20:05:07.958740 4948 scope.go:117] "RemoveContainer" containerID="c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.073486 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "896974b3-7b54-41b4-985e-9bfa9849f260" (UID: "896974b3-7b54-41b4-985e-9bfa9849f260"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.080087 4948 scope.go:117] "RemoveContainer" containerID="99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.116996 4948 scope.go:117] "RemoveContainer" containerID="fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4" Jan 20 20:05:08 crc kubenswrapper[4948]: E0120 20:05:08.120657 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4\": container with ID starting with fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4 not found: ID does not exist" containerID="fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.120693 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4"} err="failed to get container status \"fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4\": rpc error: code = NotFound desc = could not find container \"fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4\": container with ID starting with fd09aa5ef14e6206f653789eac8e2d02ac1dd27e1362c5d3e714d777daed3db4 not found: ID does not exist" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.120744 4948 scope.go:117] "RemoveContainer" containerID="c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49" Jan 20 20:05:08 crc kubenswrapper[4948]: E0120 20:05:08.123794 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49\": container with ID starting with c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49 not found: ID does not exist" containerID="c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.123835 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49"} err="failed to get container status \"c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49\": rpc error: code = NotFound desc = could not find container \"c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49\": container with ID starting with c589e7298d52c1f43edca2db7f705a6baf2aa0eafde8352f27475f751fd72c49 not found: ID does not exist" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.123861 4948 scope.go:117] "RemoveContainer" containerID="99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2" Jan 20 20:05:08 crc kubenswrapper[4948]: E0120 20:05:08.124560 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2\": container with ID starting with 99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2 not found: ID does not exist" containerID="99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.124587 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2"} err="failed to get container status \"99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2\": rpc error: code = NotFound desc = could not find container \"99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2\": container with ID starting with 99b1bdad3bcdd5e813356459ce9e9d0465fd7d8b8a98c59ede4d65ce638a1bb2 not found: ID does not exist" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.130867 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/896974b3-7b54-41b4-985e-9bfa9849f260-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.278765 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-p8b7f"] Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.292971 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-p8b7f"] Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.535973 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hpg27-config-l26bm"] Jan 20 20:05:08 crc kubenswrapper[4948]: W0120 20:05:08.543265 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16ff4b98_5002_4a48_9e41_8081b830c8eb.slice/crio-81df4da363f1aa4e90c78782f6f0b30140e2101f77b7fea0d6d916c21bbe1dd8 WatchSource:0}: Error finding container 81df4da363f1aa4e90c78782f6f0b30140e2101f77b7fea0d6d916c21bbe1dd8: Status 404 returned error can't find the container with id 81df4da363f1aa4e90c78782f6f0b30140e2101f77b7fea0d6d916c21bbe1dd8 Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.599778 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" path="/var/lib/kubelet/pods/896974b3-7b54-41b4-985e-9bfa9849f260/volumes" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.942966 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xlcmv" event={"ID":"8332c140-d061-47f6-b309-973a562bccc6","Type":"ContainerStarted","Data":"8646db1d9698dcd48767d21f65a6826c62e5129b7bd821f15967d3329288d0a3"} Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.954823 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27-config-l26bm" event={"ID":"16ff4b98-5002-4a48-9e41-8081b830c8eb","Type":"ContainerStarted","Data":"81df4da363f1aa4e90c78782f6f0b30140e2101f77b7fea0d6d916c21bbe1dd8"} Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.976902 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xlcmv" podStartSLOduration=3.239352521 podStartE2EDuration="6.976876021s" podCreationTimestamp="2026-01-20 20:05:02 +0000 UTC" firstStartedPulling="2026-01-20 20:05:04.79601966 +0000 UTC m=+932.746744629" lastFinishedPulling="2026-01-20 20:05:08.53354316 +0000 UTC m=+936.484268129" observedRunningTime="2026-01-20 20:05:08.968170164 +0000 UTC m=+936.918895153" watchObservedRunningTime="2026-01-20 20:05:08.976876021 +0000 UTC m=+936.927600990" Jan 20 20:05:08 crc kubenswrapper[4948]: I0120 20:05:08.995002 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-hpg27-config-l26bm" podStartSLOduration=1.9949754039999998 podStartE2EDuration="1.994975404s" podCreationTimestamp="2026-01-20 20:05:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:08.98952766 +0000 UTC m=+936.940252629" watchObservedRunningTime="2026-01-20 20:05:08.994975404 +0000 UTC m=+936.945700373" Jan 20 20:05:09 crc kubenswrapper[4948]: I0120 20:05:09.967179 4948 generic.go:334] "Generic (PLEG): container finished" podID="16ff4b98-5002-4a48-9e41-8081b830c8eb" containerID="e212820504850ebcb9992e631d79fba8a0d64cf4d4a9aa6a634242539f0da7c9" exitCode=0 Jan 20 20:05:09 crc kubenswrapper[4948]: I0120 20:05:09.967236 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27-config-l26bm" event={"ID":"16ff4b98-5002-4a48-9e41-8081b830c8eb","Type":"ContainerDied","Data":"e212820504850ebcb9992e631d79fba8a0d64cf4d4a9aa6a634242539f0da7c9"} Jan 20 20:05:10 crc kubenswrapper[4948]: I0120 20:05:10.979758 4948 generic.go:334] "Generic (PLEG): container finished" podID="ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" containerID="dab32a5d3c9cd2c80c9e93e11d9a18766fa9686ece61aa0e3c1fcc3405e973ff" exitCode=0 Jan 20 20:05:10 crc kubenswrapper[4948]: I0120 20:05:10.979827 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-ctgvx" event={"ID":"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845","Type":"ContainerDied","Data":"dab32a5d3c9cd2c80c9e93e11d9a18766fa9686ece61aa0e3c1fcc3405e973ff"} Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.259056 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-hpg27" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.453514 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.453813 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.529042 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.708337 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jmpg6"] Jan 20 20:05:12 crc kubenswrapper[4948]: E0120 20:05:12.711837 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="registry-server" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.711876 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="registry-server" Jan 20 20:05:12 crc kubenswrapper[4948]: E0120 20:05:12.711895 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="extract-utilities" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.711906 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="extract-utilities" Jan 20 20:05:12 crc kubenswrapper[4948]: E0120 20:05:12.711941 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="extract-content" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.711949 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="extract-content" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.712232 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="896974b3-7b54-41b4-985e-9bfa9849f260" containerName="registry-server" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.713566 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.742903 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jmpg6"] Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.835095 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-catalog-content\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.835200 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tjhl\" (UniqueName: \"kubernetes.io/projected/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-kube-api-access-2tjhl\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.835267 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-utilities\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.937189 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tjhl\" (UniqueName: \"kubernetes.io/projected/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-kube-api-access-2tjhl\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.937601 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-utilities\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.937751 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-catalog-content\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.938168 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-utilities\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.938279 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-catalog-content\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:12 crc kubenswrapper[4948]: I0120 20:05:12.960538 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tjhl\" (UniqueName: \"kubernetes.io/projected/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-kube-api-access-2tjhl\") pod \"community-operators-jmpg6\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:13 crc kubenswrapper[4948]: I0120 20:05:13.030348 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:13 crc kubenswrapper[4948]: I0120 20:05:13.095550 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:14 crc kubenswrapper[4948]: I0120 20:05:14.894266 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xlcmv"] Jan 20 20:05:16 crc kubenswrapper[4948]: I0120 20:05:16.128290 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xlcmv" podUID="8332c140-d061-47f6-b309-973a562bccc6" containerName="registry-server" containerID="cri-o://8646db1d9698dcd48767d21f65a6826c62e5129b7bd821f15967d3329288d0a3" gracePeriod=2 Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.107610 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8njnt"] Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.110010 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.127493 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8njnt"] Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.142759 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-catalog-content\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.142862 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjr8c\" (UniqueName: \"kubernetes.io/projected/24ac2816-d915-48c3-b75a-3f866aa46a43-kube-api-access-rjr8c\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.143027 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-utilities\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.146754 4948 generic.go:334] "Generic (PLEG): container finished" podID="8332c140-d061-47f6-b309-973a562bccc6" containerID="8646db1d9698dcd48767d21f65a6826c62e5129b7bd821f15967d3329288d0a3" exitCode=0 Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.146849 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xlcmv" event={"ID":"8332c140-d061-47f6-b309-973a562bccc6","Type":"ContainerDied","Data":"8646db1d9698dcd48767d21f65a6826c62e5129b7bd821f15967d3329288d0a3"} Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.244806 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-utilities\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.244877 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-catalog-content\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.244936 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjr8c\" (UniqueName: \"kubernetes.io/projected/24ac2816-d915-48c3-b75a-3f866aa46a43-kube-api-access-rjr8c\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.245420 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-utilities\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.245464 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-catalog-content\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.268676 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjr8c\" (UniqueName: \"kubernetes.io/projected/24ac2816-d915-48c3-b75a-3f866aa46a43-kube-api-access-rjr8c\") pod \"redhat-marketplace-8njnt\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:17 crc kubenswrapper[4948]: I0120 20:05:17.429353 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.249751 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.250250 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.250292 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.251006 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8ea9bb8d6d2b455140d4d17b9b3ddbc16caa6ff50e9a5f66da80be0038f97979"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.251068 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://8ea9bb8d6d2b455140d4d17b9b3ddbc16caa6ff50e9a5f66da80be0038f97979" gracePeriod=600 Jan 20 20:05:20 crc kubenswrapper[4948]: E0120 20:05:20.646922 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Jan 20 20:05:20 crc kubenswrapper[4948]: E0120 20:05:20.647413 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-57xcx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-fdwn2_openstack(d96cb8cd-dfa3-4d70-af44-be9627945b5f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:05:20 crc kubenswrapper[4948]: E0120 20:05:20.648877 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-fdwn2" podUID="d96cb8cd-dfa3-4d70-af44-be9627945b5f" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.763011 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.773734 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.941396 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-etc-swift\") pod \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.941467 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-additional-scripts\") pod \"16ff4b98-5002-4a48-9e41-8081b830c8eb\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.941506 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run\") pod \"16ff4b98-5002-4a48-9e41-8081b830c8eb\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.941593 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run-ovn\") pod \"16ff4b98-5002-4a48-9e41-8081b830c8eb\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.941717 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-combined-ca-bundle\") pod \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.941749 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-dispersionconf\") pod \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.942137 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run" (OuterVolumeSpecName: "var-run") pod "16ff4b98-5002-4a48-9e41-8081b830c8eb" (UID: "16ff4b98-5002-4a48-9e41-8081b830c8eb"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.976177 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "16ff4b98-5002-4a48-9e41-8081b830c8eb" (UID: "16ff4b98-5002-4a48-9e41-8081b830c8eb"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.977028 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" (UID: "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.979581 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "16ff4b98-5002-4a48-9e41-8081b830c8eb" (UID: "16ff4b98-5002-4a48-9e41-8081b830c8eb"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.979607 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" (UID: "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.941837 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-ring-data-devices\") pod \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.979697 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-scripts\") pod \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.979765 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4gg2\" (UniqueName: \"kubernetes.io/projected/16ff4b98-5002-4a48-9e41-8081b830c8eb-kube-api-access-k4gg2\") pod \"16ff4b98-5002-4a48-9e41-8081b830c8eb\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.979815 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6swn\" (UniqueName: \"kubernetes.io/projected/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-kube-api-access-n6swn\") pod \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.979849 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-scripts\") pod \"16ff4b98-5002-4a48-9e41-8081b830c8eb\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.979866 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-log-ovn\") pod \"16ff4b98-5002-4a48-9e41-8081b830c8eb\" (UID: \"16ff4b98-5002-4a48-9e41-8081b830c8eb\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.979896 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-swiftconf\") pod \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\" (UID: \"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845\") " Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.981332 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-scripts" (OuterVolumeSpecName: "scripts") pod "16ff4b98-5002-4a48-9e41-8081b830c8eb" (UID: "16ff4b98-5002-4a48-9e41-8081b830c8eb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.981372 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "16ff4b98-5002-4a48-9e41-8081b830c8eb" (UID: "16ff4b98-5002-4a48-9e41-8081b830c8eb"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.982483 4948 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.982508 4948 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.982529 4948 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.982538 4948 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.982650 4948 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.982661 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/16ff4b98-5002-4a48-9e41-8081b830c8eb-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:20 crc kubenswrapper[4948]: I0120 20:05:20.983932 4948 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/16ff4b98-5002-4a48-9e41-8081b830c8eb-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.001493 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16ff4b98-5002-4a48-9e41-8081b830c8eb-kube-api-access-k4gg2" (OuterVolumeSpecName: "kube-api-access-k4gg2") pod "16ff4b98-5002-4a48-9e41-8081b830c8eb" (UID: "16ff4b98-5002-4a48-9e41-8081b830c8eb"). InnerVolumeSpecName "kube-api-access-k4gg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.001814 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-kube-api-access-n6swn" (OuterVolumeSpecName: "kube-api-access-n6swn") pod "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" (UID: "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845"). InnerVolumeSpecName "kube-api-access-n6swn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.002933 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" (UID: "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.030691 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.050172 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" (UID: "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.052178 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-scripts" (OuterVolumeSpecName: "scripts") pod "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" (UID: "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.052432 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" (UID: "ce6ef66a-e0b9-4dbf-9c1b-262e952e9845"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.084615 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-utilities\") pod \"8332c140-d061-47f6-b309-973a562bccc6\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.084811 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqljj\" (UniqueName: \"kubernetes.io/projected/8332c140-d061-47f6-b309-973a562bccc6-kube-api-access-zqljj\") pod \"8332c140-d061-47f6-b309-973a562bccc6\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.084848 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-catalog-content\") pod \"8332c140-d061-47f6-b309-973a562bccc6\" (UID: \"8332c140-d061-47f6-b309-973a562bccc6\") " Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.085195 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.085209 4948 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.085218 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.085228 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4gg2\" (UniqueName: \"kubernetes.io/projected/16ff4b98-5002-4a48-9e41-8081b830c8eb-kube-api-access-k4gg2\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.085239 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6swn\" (UniqueName: \"kubernetes.io/projected/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-kube-api-access-n6swn\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.085254 4948 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ce6ef66a-e0b9-4dbf-9c1b-262e952e9845-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.085603 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-utilities" (OuterVolumeSpecName: "utilities") pod "8332c140-d061-47f6-b309-973a562bccc6" (UID: "8332c140-d061-47f6-b309-973a562bccc6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.088299 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8332c140-d061-47f6-b309-973a562bccc6-kube-api-access-zqljj" (OuterVolumeSpecName: "kube-api-access-zqljj") pod "8332c140-d061-47f6-b309-973a562bccc6" (UID: "8332c140-d061-47f6-b309-973a562bccc6"). InnerVolumeSpecName "kube-api-access-zqljj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.137438 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8332c140-d061-47f6-b309-973a562bccc6" (UID: "8332c140-d061-47f6-b309-973a562bccc6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.204515 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqljj\" (UniqueName: \"kubernetes.io/projected/8332c140-d061-47f6-b309-973a562bccc6-kube-api-access-zqljj\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.205174 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.205293 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8332c140-d061-47f6-b309-973a562bccc6-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.232767 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8njnt"] Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.247615 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-ctgvx" event={"ID":"ce6ef66a-e0b9-4dbf-9c1b-262e952e9845","Type":"ContainerDied","Data":"0f9e8af3f8cf01eb352886dfbc0173a52d81018d10c342fee20365367e8413c7"} Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.247672 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f9e8af3f8cf01eb352886dfbc0173a52d81018d10c342fee20365367e8413c7" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.247779 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-ctgvx" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.253982 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xlcmv" event={"ID":"8332c140-d061-47f6-b309-973a562bccc6","Type":"ContainerDied","Data":"adc48e0aac3aaa9f5430ca70ea00ca35266e502b97acd4f84031820abaa83414"} Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.254040 4948 scope.go:117] "RemoveContainer" containerID="8646db1d9698dcd48767d21f65a6826c62e5129b7bd821f15967d3329288d0a3" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.254179 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xlcmv" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.266653 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27-config-l26bm" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.267543 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27-config-l26bm" event={"ID":"16ff4b98-5002-4a48-9e41-8081b830c8eb","Type":"ContainerDied","Data":"81df4da363f1aa4e90c78782f6f0b30140e2101f77b7fea0d6d916c21bbe1dd8"} Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.267593 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="81df4da363f1aa4e90c78782f6f0b30140e2101f77b7fea0d6d916c21bbe1dd8" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.269837 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8njnt" event={"ID":"24ac2816-d915-48c3-b75a-3f866aa46a43","Type":"ContainerStarted","Data":"ea51b5ad137b44712b408cbd575f06bd9ba0230dceee486be5e47a4f5f471633"} Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.274210 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="8ea9bb8d6d2b455140d4d17b9b3ddbc16caa6ff50e9a5f66da80be0038f97979" exitCode=0 Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.275193 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"8ea9bb8d6d2b455140d4d17b9b3ddbc16caa6ff50e9a5f66da80be0038f97979"} Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.275240 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"a26c04565cc618f3f275d4a90dd01432ac1f9fe490efd0919ef900cbd2cc4e1c"} Jan 20 20:05:21 crc kubenswrapper[4948]: E0120 20:05:21.279583 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-fdwn2" podUID="d96cb8cd-dfa3-4d70-af44-be9627945b5f" Jan 20 20:05:21 crc kubenswrapper[4948]: W0120 20:05:21.287352 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeba0bf3a_2428_41df_a1b2_bdfd93056ff4.slice/crio-f4e4fb748be661b34bc14379f6883873caa6471a04171b97c671dead20c72d36 WatchSource:0}: Error finding container f4e4fb748be661b34bc14379f6883873caa6471a04171b97c671dead20c72d36: Status 404 returned error can't find the container with id f4e4fb748be661b34bc14379f6883873caa6471a04171b97c671dead20c72d36 Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.305750 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jmpg6"] Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.307248 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.312001 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/253a8193-904e-4f62-adbe-597b97b4fd30-etc-swift\") pod \"swift-storage-0\" (UID: \"253a8193-904e-4f62-adbe-597b97b4fd30\") " pod="openstack/swift-storage-0" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.313111 4948 scope.go:117] "RemoveContainer" containerID="392f0628c298ef4a754588adaef8611274577fc86cd7bd7cd091a9a27105b1cb" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.365887 4948 scope.go:117] "RemoveContainer" containerID="254a7a439497af193dd6aace560c84bbeaaf2d924a9cd29abf9ff5dc361d2732" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.376293 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xlcmv"] Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.389967 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xlcmv"] Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.434026 4948 scope.go:117] "RemoveContainer" containerID="d62e03ef00dbbeb77df97565ffab795a12284dfbc62cb77594b2a0a88f280a6c" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.581341 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.928339 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-hpg27-config-l26bm"] Jan 20 20:05:21 crc kubenswrapper[4948]: I0120 20:05:21.949764 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-hpg27-config-l26bm"] Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.013210 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-hpg27-config-4gxkt"] Jan 20 20:05:22 crc kubenswrapper[4948]: E0120 20:05:22.013654 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8332c140-d061-47f6-b309-973a562bccc6" containerName="extract-utilities" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.013678 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8332c140-d061-47f6-b309-973a562bccc6" containerName="extract-utilities" Jan 20 20:05:22 crc kubenswrapper[4948]: E0120 20:05:22.013713 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" containerName="swift-ring-rebalance" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.013723 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" containerName="swift-ring-rebalance" Jan 20 20:05:22 crc kubenswrapper[4948]: E0120 20:05:22.013742 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16ff4b98-5002-4a48-9e41-8081b830c8eb" containerName="ovn-config" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.013750 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="16ff4b98-5002-4a48-9e41-8081b830c8eb" containerName="ovn-config" Jan 20 20:05:22 crc kubenswrapper[4948]: E0120 20:05:22.013762 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8332c140-d061-47f6-b309-973a562bccc6" containerName="registry-server" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.013772 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8332c140-d061-47f6-b309-973a562bccc6" containerName="registry-server" Jan 20 20:05:22 crc kubenswrapper[4948]: E0120 20:05:22.013791 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8332c140-d061-47f6-b309-973a562bccc6" containerName="extract-content" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.013798 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8332c140-d061-47f6-b309-973a562bccc6" containerName="extract-content" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.014032 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="8332c140-d061-47f6-b309-973a562bccc6" containerName="registry-server" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.014054 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce6ef66a-e0b9-4dbf-9c1b-262e952e9845" containerName="swift-ring-rebalance" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.014067 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="16ff4b98-5002-4a48-9e41-8081b830c8eb" containerName="ovn-config" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.014843 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.020673 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.045616 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hpg27-config-4gxkt"] Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.120755 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.120836 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-additional-scripts\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.120882 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-scripts\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.120905 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22ccw\" (UniqueName: \"kubernetes.io/projected/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-kube-api-access-22ccw\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.121073 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-log-ovn\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.121165 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run-ovn\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.132414 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.224157 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run-ovn\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.224631 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-additional-scripts\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.224673 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.224763 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-scripts\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.224791 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22ccw\" (UniqueName: \"kubernetes.io/projected/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-kube-api-access-22ccw\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.224872 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-log-ovn\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.226658 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run-ovn\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.228016 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-additional-scripts\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.228260 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.235362 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-log-ovn\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.243392 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-scripts\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: W0120 20:05:22.288368 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod253a8193_904e_4f62_adbe_597b97b4fd30.slice/crio-7546f7e8b74298a8667009f40591597fa4c311a63a8075d4974ff3deb98f89d0 WatchSource:0}: Error finding container 7546f7e8b74298a8667009f40591597fa4c311a63a8075d4974ff3deb98f89d0: Status 404 returned error can't find the container with id 7546f7e8b74298a8667009f40591597fa4c311a63a8075d4974ff3deb98f89d0 Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.290948 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22ccw\" (UniqueName: \"kubernetes.io/projected/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-kube-api-access-22ccw\") pod \"ovn-controller-hpg27-config-4gxkt\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.297464 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.299764 4948 generic.go:334] "Generic (PLEG): container finished" podID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerID="cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560" exitCode=0 Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.299844 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmpg6" event={"ID":"eba0bf3a-2428-41df-a1b2-bdfd93056ff4","Type":"ContainerDied","Data":"cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560"} Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.299873 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmpg6" event={"ID":"eba0bf3a-2428-41df-a1b2-bdfd93056ff4","Type":"ContainerStarted","Data":"f4e4fb748be661b34bc14379f6883873caa6471a04171b97c671dead20c72d36"} Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.332291 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.336062 4948 generic.go:334] "Generic (PLEG): container finished" podID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerID="7c8e3bbb2b8de0291a990aebc3feba86bc46aad3f89c3dda453e7518c5b18980" exitCode=0 Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.336149 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8njnt" event={"ID":"24ac2816-d915-48c3-b75a-3f866aa46a43","Type":"ContainerDied","Data":"7c8e3bbb2b8de0291a990aebc3feba86bc46aad3f89c3dda453e7518c5b18980"} Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.585299 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16ff4b98-5002-4a48-9e41-8081b830c8eb" path="/var/lib/kubelet/pods/16ff4b98-5002-4a48-9e41-8081b830c8eb/volumes" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.587525 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8332c140-d061-47f6-b309-973a562bccc6" path="/var/lib/kubelet/pods/8332c140-d061-47f6-b309-973a562bccc6/volumes" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.797017 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.917508 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-qnfsz"] Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.924812 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:22 crc kubenswrapper[4948]: I0120 20:05:22.995028 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-qnfsz"] Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.087018 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bdns\" (UniqueName: \"kubernetes.io/projected/19434efc-51da-454c-a87d-91bd70e97ad1-kube-api-access-8bdns\") pod \"cinder-db-create-qnfsz\" (UID: \"19434efc-51da-454c-a87d-91bd70e97ad1\") " pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.087118 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19434efc-51da-454c-a87d-91bd70e97ad1-operator-scripts\") pod \"cinder-db-create-qnfsz\" (UID: \"19434efc-51da-454c-a87d-91bd70e97ad1\") " pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.146099 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-ctqgn"] Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.147378 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.183515 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-ctqgn"] Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.189339 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bdns\" (UniqueName: \"kubernetes.io/projected/19434efc-51da-454c-a87d-91bd70e97ad1-kube-api-access-8bdns\") pod \"cinder-db-create-qnfsz\" (UID: \"19434efc-51da-454c-a87d-91bd70e97ad1\") " pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.189430 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19434efc-51da-454c-a87d-91bd70e97ad1-operator-scripts\") pod \"cinder-db-create-qnfsz\" (UID: \"19434efc-51da-454c-a87d-91bd70e97ad1\") " pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.189476 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-operator-scripts\") pod \"barbican-db-create-ctqgn\" (UID: \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\") " pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.189502 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7dvv\" (UniqueName: \"kubernetes.io/projected/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-kube-api-access-s7dvv\") pod \"barbican-db-create-ctqgn\" (UID: \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\") " pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.195332 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19434efc-51da-454c-a87d-91bd70e97ad1-operator-scripts\") pod \"cinder-db-create-qnfsz\" (UID: \"19434efc-51da-454c-a87d-91bd70e97ad1\") " pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.343505 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-operator-scripts\") pod \"barbican-db-create-ctqgn\" (UID: \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\") " pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.345602 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-operator-scripts\") pod \"barbican-db-create-ctqgn\" (UID: \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\") " pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.345678 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7dvv\" (UniqueName: \"kubernetes.io/projected/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-kube-api-access-s7dvv\") pod \"barbican-db-create-ctqgn\" (UID: \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\") " pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.411610 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bdns\" (UniqueName: \"kubernetes.io/projected/19434efc-51da-454c-a87d-91bd70e97ad1-kube-api-access-8bdns\") pod \"cinder-db-create-qnfsz\" (UID: \"19434efc-51da-454c-a87d-91bd70e97ad1\") " pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.454453 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"7546f7e8b74298a8667009f40591597fa4c311a63a8075d4974ff3deb98f89d0"} Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.471881 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-16db-account-create-update-d7lmx"] Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.473076 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.477855 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.493372 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7dvv\" (UniqueName: \"kubernetes.io/projected/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-kube-api-access-s7dvv\") pod \"barbican-db-create-ctqgn\" (UID: \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\") " pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.531604 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-16db-account-create-update-d7lmx"] Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.550061 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a2522fe2-db81-4fae-abeb-e99db7690237-operator-scripts\") pod \"barbican-16db-account-create-update-d7lmx\" (UID: \"a2522fe2-db81-4fae-abeb-e99db7690237\") " pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.550151 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwl7n\" (UniqueName: \"kubernetes.io/projected/a2522fe2-db81-4fae-abeb-e99db7690237-kube-api-access-zwl7n\") pod \"barbican-16db-account-create-update-d7lmx\" (UID: \"a2522fe2-db81-4fae-abeb-e99db7690237\") " pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.622196 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.654974 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a2522fe2-db81-4fae-abeb-e99db7690237-operator-scripts\") pod \"barbican-16db-account-create-update-d7lmx\" (UID: \"a2522fe2-db81-4fae-abeb-e99db7690237\") " pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.655105 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwl7n\" (UniqueName: \"kubernetes.io/projected/a2522fe2-db81-4fae-abeb-e99db7690237-kube-api-access-zwl7n\") pod \"barbican-16db-account-create-update-d7lmx\" (UID: \"a2522fe2-db81-4fae-abeb-e99db7690237\") " pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.656914 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a2522fe2-db81-4fae-abeb-e99db7690237-operator-scripts\") pod \"barbican-16db-account-create-update-d7lmx\" (UID: \"a2522fe2-db81-4fae-abeb-e99db7690237\") " pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.746663 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwl7n\" (UniqueName: \"kubernetes.io/projected/a2522fe2-db81-4fae-abeb-e99db7690237-kube-api-access-zwl7n\") pod \"barbican-16db-account-create-update-d7lmx\" (UID: \"a2522fe2-db81-4fae-abeb-e99db7690237\") " pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.769445 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.848389 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.856792 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hpg27-config-4gxkt"] Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.889845 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-5116-account-create-update-6hrrc"] Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.891027 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.906299 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.934444 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5116-account-create-update-6hrrc"] Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.967931 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01681e12-ad87-49f8-8f36-0631b107e19d-operator-scripts\") pod \"cinder-5116-account-create-update-6hrrc\" (UID: \"01681e12-ad87-49f8-8f36-0631b107e19d\") " pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:23 crc kubenswrapper[4948]: I0120 20:05:23.968434 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8l8kc\" (UniqueName: \"kubernetes.io/projected/01681e12-ad87-49f8-8f36-0631b107e19d-kube-api-access-8l8kc\") pod \"cinder-5116-account-create-update-6hrrc\" (UID: \"01681e12-ad87-49f8-8f36-0631b107e19d\") " pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.070525 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8l8kc\" (UniqueName: \"kubernetes.io/projected/01681e12-ad87-49f8-8f36-0631b107e19d-kube-api-access-8l8kc\") pod \"cinder-5116-account-create-update-6hrrc\" (UID: \"01681e12-ad87-49f8-8f36-0631b107e19d\") " pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.070609 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01681e12-ad87-49f8-8f36-0631b107e19d-operator-scripts\") pod \"cinder-5116-account-create-update-6hrrc\" (UID: \"01681e12-ad87-49f8-8f36-0631b107e19d\") " pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.072160 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01681e12-ad87-49f8-8f36-0631b107e19d-operator-scripts\") pod \"cinder-5116-account-create-update-6hrrc\" (UID: \"01681e12-ad87-49f8-8f36-0631b107e19d\") " pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.079777 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-7x47d"] Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.081147 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.094852 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-7x47d"] Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.125975 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-cc7hs"] Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.127074 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.134240 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.134342 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.142942 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.143125 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9zfkq" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.144966 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8l8kc\" (UniqueName: \"kubernetes.io/projected/01681e12-ad87-49f8-8f36-0631b107e19d-kube-api-access-8l8kc\") pod \"cinder-5116-account-create-update-6hrrc\" (UID: \"01681e12-ad87-49f8-8f36-0631b107e19d\") " pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.173038 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-combined-ca-bundle\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.173140 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-config-data\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.173220 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-operator-scripts\") pod \"neutron-db-create-7x47d\" (UID: \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\") " pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.173245 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zccb4\" (UniqueName: \"kubernetes.io/projected/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-kube-api-access-zccb4\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.173282 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqz57\" (UniqueName: \"kubernetes.io/projected/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-kube-api-access-xqz57\") pod \"neutron-db-create-7x47d\" (UID: \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\") " pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.181617 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-cc7hs"] Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.238845 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.249733 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-0912-account-create-update-r5z5f"] Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.258786 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.277579 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.278308 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-operator-scripts\") pod \"neutron-db-create-7x47d\" (UID: \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\") " pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.278372 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zccb4\" (UniqueName: \"kubernetes.io/projected/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-kube-api-access-zccb4\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.278421 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqz57\" (UniqueName: \"kubernetes.io/projected/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-kube-api-access-xqz57\") pod \"neutron-db-create-7x47d\" (UID: \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\") " pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.278442 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-combined-ca-bundle\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.278526 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-config-data\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.280165 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-operator-scripts\") pod \"neutron-db-create-7x47d\" (UID: \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\") " pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.284732 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0912-account-create-update-r5z5f"] Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.288535 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-combined-ca-bundle\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.298105 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-config-data\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.345349 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqz57\" (UniqueName: \"kubernetes.io/projected/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-kube-api-access-xqz57\") pod \"neutron-db-create-7x47d\" (UID: \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\") " pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.383640 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcm5l\" (UniqueName: \"kubernetes.io/projected/8665723e-3db4-4331-892a-015554f4c300-kube-api-access-jcm5l\") pod \"neutron-0912-account-create-update-r5z5f\" (UID: \"8665723e-3db4-4331-892a-015554f4c300\") " pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.383894 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8665723e-3db4-4331-892a-015554f4c300-operator-scripts\") pod \"neutron-0912-account-create-update-r5z5f\" (UID: \"8665723e-3db4-4331-892a-015554f4c300\") " pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.394743 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zccb4\" (UniqueName: \"kubernetes.io/projected/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-kube-api-access-zccb4\") pod \"keystone-db-sync-cc7hs\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.411407 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.447886 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.484742 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmpg6" event={"ID":"eba0bf3a-2428-41df-a1b2-bdfd93056ff4","Type":"ContainerStarted","Data":"c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c"} Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.484983 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8665723e-3db4-4331-892a-015554f4c300-operator-scripts\") pod \"neutron-0912-account-create-update-r5z5f\" (UID: \"8665723e-3db4-4331-892a-015554f4c300\") " pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.485249 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcm5l\" (UniqueName: \"kubernetes.io/projected/8665723e-3db4-4331-892a-015554f4c300-kube-api-access-jcm5l\") pod \"neutron-0912-account-create-update-r5z5f\" (UID: \"8665723e-3db4-4331-892a-015554f4c300\") " pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.486395 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8665723e-3db4-4331-892a-015554f4c300-operator-scripts\") pod \"neutron-0912-account-create-update-r5z5f\" (UID: \"8665723e-3db4-4331-892a-015554f4c300\") " pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.498491 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27-config-4gxkt" event={"ID":"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40","Type":"ContainerStarted","Data":"76057af34d91d85f774c1c07ecd9437ec9b1f509c2d2ea7092c961dc809e291a"} Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.539778 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcm5l\" (UniqueName: \"kubernetes.io/projected/8665723e-3db4-4331-892a-015554f4c300-kube-api-access-jcm5l\") pod \"neutron-0912-account-create-update-r5z5f\" (UID: \"8665723e-3db4-4331-892a-015554f4c300\") " pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.611523 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.855778 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-qnfsz"] Jan 20 20:05:24 crc kubenswrapper[4948]: I0120 20:05:24.964617 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-ctqgn"] Jan 20 20:05:25 crc kubenswrapper[4948]: I0120 20:05:25.574841 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-qnfsz" event={"ID":"19434efc-51da-454c-a87d-91bd70e97ad1","Type":"ContainerStarted","Data":"7056ca93f22700c9f97621086f6784b918e2720e7a9002ac22dc6bdee2e4e7d2"} Jan 20 20:05:25 crc kubenswrapper[4948]: I0120 20:05:25.584468 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27-config-4gxkt" event={"ID":"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40","Type":"ContainerStarted","Data":"f487e4e91ecaa0711310c8e0b7acc4cff2d35e96dd3ae6fa1f545418d6f523a9"} Jan 20 20:05:25 crc kubenswrapper[4948]: I0120 20:05:25.590075 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ctqgn" event={"ID":"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759","Type":"ContainerStarted","Data":"c62d0c729ef35e3eba95c7583fe5a5829b76fff8a6b38643f0c2241c8d164bea"} Jan 20 20:05:25 crc kubenswrapper[4948]: I0120 20:05:25.615172 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-hpg27-config-4gxkt" podStartSLOduration=4.615144051 podStartE2EDuration="4.615144051s" podCreationTimestamp="2026-01-20 20:05:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:25.615144901 +0000 UTC m=+953.565869860" watchObservedRunningTime="2026-01-20 20:05:25.615144051 +0000 UTC m=+953.565869010" Jan 20 20:05:26 crc kubenswrapper[4948]: I0120 20:05:26.634466 4948 generic.go:334] "Generic (PLEG): container finished" podID="4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" containerID="f487e4e91ecaa0711310c8e0b7acc4cff2d35e96dd3ae6fa1f545418d6f523a9" exitCode=0 Jan 20 20:05:26 crc kubenswrapper[4948]: I0120 20:05:26.634865 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27-config-4gxkt" event={"ID":"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40","Type":"ContainerDied","Data":"f487e4e91ecaa0711310c8e0b7acc4cff2d35e96dd3ae6fa1f545418d6f523a9"} Jan 20 20:05:26 crc kubenswrapper[4948]: I0120 20:05:26.874073 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-5116-account-create-update-6hrrc"] Jan 20 20:05:26 crc kubenswrapper[4948]: I0120 20:05:26.897029 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-16db-account-create-update-d7lmx"] Jan 20 20:05:26 crc kubenswrapper[4948]: W0120 20:05:26.897321 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01681e12_ad87_49f8_8f36_0631b107e19d.slice/crio-54011166c361352066a19fff377d722340636188cc7c2103ec1503e4b88a849b WatchSource:0}: Error finding container 54011166c361352066a19fff377d722340636188cc7c2103ec1503e4b88a849b: Status 404 returned error can't find the container with id 54011166c361352066a19fff377d722340636188cc7c2103ec1503e4b88a849b Jan 20 20:05:26 crc kubenswrapper[4948]: W0120 20:05:26.909931 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2522fe2_db81_4fae_abeb_e99db7690237.slice/crio-b4452d9c8b940cd63de574df21b3866d5368fc2c5e5da9fa08a1fd3f1638dc12 WatchSource:0}: Error finding container b4452d9c8b940cd63de574df21b3866d5368fc2c5e5da9fa08a1fd3f1638dc12: Status 404 returned error can't find the container with id b4452d9c8b940cd63de574df21b3866d5368fc2c5e5da9fa08a1fd3f1638dc12 Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.332221 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0912-account-create-update-r5z5f"] Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.494350 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-cc7hs"] Jan 20 20:05:27 crc kubenswrapper[4948]: W0120 20:05:27.579109 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8dd9b1bc_11ee_4556_8c6a_699196c19ec1.slice/crio-f836bda370cc551faa1f5e836cf8c005c60af1a012cc7155cd97ba9d99ecf70b WatchSource:0}: Error finding container f836bda370cc551faa1f5e836cf8c005c60af1a012cc7155cd97ba9d99ecf70b: Status 404 returned error can't find the container with id f836bda370cc551faa1f5e836cf8c005c60af1a012cc7155cd97ba9d99ecf70b Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.603835 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-7x47d"] Jan 20 20:05:27 crc kubenswrapper[4948]: W0120 20:05:27.619103 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2cf4ce2_6783_421e_9ca3_2bb938815f2f.slice/crio-360a2e5820d056783ed1bc6c644fc5aefca138cf9597c85e0e72ba1c386f805b WatchSource:0}: Error finding container 360a2e5820d056783ed1bc6c644fc5aefca138cf9597c85e0e72ba1c386f805b: Status 404 returned error can't find the container with id 360a2e5820d056783ed1bc6c644fc5aefca138cf9597c85e0e72ba1c386f805b Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.644038 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cc7hs" event={"ID":"8dd9b1bc-11ee-4556-8c6a-699196c19ec1","Type":"ContainerStarted","Data":"f836bda370cc551faa1f5e836cf8c005c60af1a012cc7155cd97ba9d99ecf70b"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.651472 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5116-account-create-update-6hrrc" event={"ID":"01681e12-ad87-49f8-8f36-0631b107e19d","Type":"ContainerStarted","Data":"54011166c361352066a19fff377d722340636188cc7c2103ec1503e4b88a849b"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.653667 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-7x47d" event={"ID":"d2cf4ce2-6783-421e-9ca3-2bb938815f2f","Type":"ContainerStarted","Data":"360a2e5820d056783ed1bc6c644fc5aefca138cf9597c85e0e72ba1c386f805b"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.656535 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ctqgn" event={"ID":"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759","Type":"ContainerStarted","Data":"defc9602a3aec24af7b0bcc94383737cda733142f7764368bf590714f79cbedc"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.661481 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0912-account-create-update-r5z5f" event={"ID":"8665723e-3db4-4331-892a-015554f4c300","Type":"ContainerStarted","Data":"0ca12fc1010b6140fac61724a0995803f1771b86040656f4139e80d940182a06"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.663425 4948 generic.go:334] "Generic (PLEG): container finished" podID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerID="c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c" exitCode=0 Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.663482 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmpg6" event={"ID":"eba0bf3a-2428-41df-a1b2-bdfd93056ff4","Type":"ContainerDied","Data":"c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.666021 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-16db-account-create-update-d7lmx" event={"ID":"a2522fe2-db81-4fae-abeb-e99db7690237","Type":"ContainerStarted","Data":"b4452d9c8b940cd63de574df21b3866d5368fc2c5e5da9fa08a1fd3f1638dc12"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.677522 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8njnt" event={"ID":"24ac2816-d915-48c3-b75a-3f866aa46a43","Type":"ContainerStarted","Data":"8d6c7feb57504becceb7771eaf561c74bbe33a92945791a56c201dc290915db7"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.781283 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-qnfsz" event={"ID":"19434efc-51da-454c-a87d-91bd70e97ad1","Type":"ContainerStarted","Data":"c83e0f39d777297f6e3dc2807a8e05b369b1f4126665bed3026397f23c7a7066"} Jan 20 20:05:27 crc kubenswrapper[4948]: I0120 20:05:27.808924 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-ctqgn" podStartSLOduration=4.808893276 podStartE2EDuration="4.808893276s" podCreationTimestamp="2026-01-20 20:05:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:27.793482019 +0000 UTC m=+955.744206988" watchObservedRunningTime="2026-01-20 20:05:27.808893276 +0000 UTC m=+955.759618245" Jan 20 20:05:28 crc kubenswrapper[4948]: I0120 20:05:28.057830 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-qnfsz" podStartSLOduration=6.057799694 podStartE2EDuration="6.057799694s" podCreationTimestamp="2026-01-20 20:05:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:28.030370537 +0000 UTC m=+955.981095516" watchObservedRunningTime="2026-01-20 20:05:28.057799694 +0000 UTC m=+956.008524663" Jan 20 20:05:28 crc kubenswrapper[4948]: I0120 20:05:28.941461 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"644750abcdb216a7a48b5e82f7ea40c19650b5fa0b5f77fa1ef753bbd38c61dd"} Jan 20 20:05:28 crc kubenswrapper[4948]: I0120 20:05:28.966372 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5116-account-create-update-6hrrc" event={"ID":"01681e12-ad87-49f8-8f36-0631b107e19d","Type":"ContainerStarted","Data":"87626e893ab3487cbc6ec1c93cab9ee8078a015e481b31a2490ac8a03a32bc24"} Jan 20 20:05:28 crc kubenswrapper[4948]: I0120 20:05:28.982485 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-16db-account-create-update-d7lmx" event={"ID":"a2522fe2-db81-4fae-abeb-e99db7690237","Type":"ContainerStarted","Data":"3a3491925eceda3144c2222da6d443c7f8af4a54848aadc137f7c5ff19e4aa48"} Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.002340 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-5116-account-create-update-6hrrc" podStartSLOduration=6.002319907 podStartE2EDuration="6.002319907s" podCreationTimestamp="2026-01-20 20:05:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:28.996852992 +0000 UTC m=+956.947577961" watchObservedRunningTime="2026-01-20 20:05:29.002319907 +0000 UTC m=+956.953044876" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.020820 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-16db-account-create-update-d7lmx" podStartSLOduration=6.020797581 podStartE2EDuration="6.020797581s" podCreationTimestamp="2026-01-20 20:05:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:29.016107328 +0000 UTC m=+956.966832297" watchObservedRunningTime="2026-01-20 20:05:29.020797581 +0000 UTC m=+956.971522550" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.138441 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.187109 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run-ovn\") pod \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.187505 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-log-ovn\") pod \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.187537 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22ccw\" (UniqueName: \"kubernetes.io/projected/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-kube-api-access-22ccw\") pod \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.187592 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-scripts\") pod \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.187619 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run\") pod \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.187667 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-additional-scripts\") pod \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\" (UID: \"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40\") " Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.188642 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" (UID: "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.191956 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" (UID: "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.192024 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" (UID: "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.192048 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run" (OuterVolumeSpecName: "var-run") pod "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" (UID: "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.210606 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-scripts" (OuterVolumeSpecName: "scripts") pod "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" (UID: "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.219944 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-kube-api-access-22ccw" (OuterVolumeSpecName: "kube-api-access-22ccw") pod "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" (UID: "4facbac8-bbd0-4d0b-83d9-bf2ce7834a40"). InnerVolumeSpecName "kube-api-access-22ccw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.292926 4948 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.292971 4948 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.292984 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22ccw\" (UniqueName: \"kubernetes.io/projected/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-kube-api-access-22ccw\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.292997 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.293008 4948 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-var-run\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.293021 4948 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.990917 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0912-account-create-update-r5z5f" event={"ID":"8665723e-3db4-4331-892a-015554f4c300","Type":"ContainerStarted","Data":"5a68b290623e7026f56160c6093714a427d69ef777dd603d05bfc4bbcc1a68ef"} Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.995114 4948 generic.go:334] "Generic (PLEG): container finished" podID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerID="8d6c7feb57504becceb7771eaf561c74bbe33a92945791a56c201dc290915db7" exitCode=0 Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.995193 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8njnt" event={"ID":"24ac2816-d915-48c3-b75a-3f866aa46a43","Type":"ContainerDied","Data":"8d6c7feb57504becceb7771eaf561c74bbe33a92945791a56c201dc290915db7"} Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.997295 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hpg27-config-4gxkt" event={"ID":"4facbac8-bbd0-4d0b-83d9-bf2ce7834a40","Type":"ContainerDied","Data":"76057af34d91d85f774c1c07ecd9437ec9b1f509c2d2ea7092c961dc809e291a"} Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.997314 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hpg27-config-4gxkt" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.997326 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76057af34d91d85f774c1c07ecd9437ec9b1f509c2d2ea7092c961dc809e291a" Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.999878 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"918ed87ecee678a01f7fad19f38046c8494449fa8a042bf9cd04955e699212da"} Jan 20 20:05:29 crc kubenswrapper[4948]: I0120 20:05:29.999912 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"29ed79f95f4efbf2d5ad1937ffdb2a7fb679525b05461c7dbb94f4b8b466b6f0"} Jan 20 20:05:30 crc kubenswrapper[4948]: I0120 20:05:30.001906 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-7x47d" event={"ID":"d2cf4ce2-6783-421e-9ca3-2bb938815f2f","Type":"ContainerStarted","Data":"5d56cd5f8c52843ec4d242cb094fb9fcd3e2b69ba20eedb713be72f2ea4d3d90"} Jan 20 20:05:30 crc kubenswrapper[4948]: I0120 20:05:30.033076 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-0912-account-create-update-r5z5f" podStartSLOduration=6.033051755 podStartE2EDuration="6.033051755s" podCreationTimestamp="2026-01-20 20:05:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:30.024597555 +0000 UTC m=+957.975322524" watchObservedRunningTime="2026-01-20 20:05:30.033051755 +0000 UTC m=+957.983776724" Jan 20 20:05:30 crc kubenswrapper[4948]: I0120 20:05:30.077346 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-7x47d" podStartSLOduration=6.0773231 podStartE2EDuration="6.0773231s" podCreationTimestamp="2026-01-20 20:05:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:30.07521283 +0000 UTC m=+958.025937799" watchObservedRunningTime="2026-01-20 20:05:30.0773231 +0000 UTC m=+958.028048069" Jan 20 20:05:30 crc kubenswrapper[4948]: I0120 20:05:30.232998 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-hpg27-config-4gxkt"] Jan 20 20:05:30 crc kubenswrapper[4948]: I0120 20:05:30.245131 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-hpg27-config-4gxkt"] Jan 20 20:05:30 crc kubenswrapper[4948]: I0120 20:05:30.579610 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" path="/var/lib/kubelet/pods/4facbac8-bbd0-4d0b-83d9-bf2ce7834a40/volumes" Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.029315 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmpg6" event={"ID":"eba0bf3a-2428-41df-a1b2-bdfd93056ff4","Type":"ContainerStarted","Data":"63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.039073 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"30539e0034f0c83f1b6ce3c17e50de4b3a10c4b3a286fcdf0e88652d6a50b09f"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.040661 4948 generic.go:334] "Generic (PLEG): container finished" podID="01681e12-ad87-49f8-8f36-0631b107e19d" containerID="87626e893ab3487cbc6ec1c93cab9ee8078a015e481b31a2490ac8a03a32bc24" exitCode=0 Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.040731 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5116-account-create-update-6hrrc" event={"ID":"01681e12-ad87-49f8-8f36-0631b107e19d","Type":"ContainerDied","Data":"87626e893ab3487cbc6ec1c93cab9ee8078a015e481b31a2490ac8a03a32bc24"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.042389 4948 generic.go:334] "Generic (PLEG): container finished" podID="d2cf4ce2-6783-421e-9ca3-2bb938815f2f" containerID="5d56cd5f8c52843ec4d242cb094fb9fcd3e2b69ba20eedb713be72f2ea4d3d90" exitCode=0 Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.042459 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-7x47d" event={"ID":"d2cf4ce2-6783-421e-9ca3-2bb938815f2f","Type":"ContainerDied","Data":"5d56cd5f8c52843ec4d242cb094fb9fcd3e2b69ba20eedb713be72f2ea4d3d90"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.043961 4948 generic.go:334] "Generic (PLEG): container finished" podID="a2522fe2-db81-4fae-abeb-e99db7690237" containerID="3a3491925eceda3144c2222da6d443c7f8af4a54848aadc137f7c5ff19e4aa48" exitCode=0 Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.044018 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-16db-account-create-update-d7lmx" event={"ID":"a2522fe2-db81-4fae-abeb-e99db7690237","Type":"ContainerDied","Data":"3a3491925eceda3144c2222da6d443c7f8af4a54848aadc137f7c5ff19e4aa48"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.047087 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jmpg6" podStartSLOduration=11.288190329 podStartE2EDuration="20.047075193s" podCreationTimestamp="2026-01-20 20:05:12 +0000 UTC" firstStartedPulling="2026-01-20 20:05:22.306157023 +0000 UTC m=+950.256881992" lastFinishedPulling="2026-01-20 20:05:31.065041897 +0000 UTC m=+959.015766856" observedRunningTime="2026-01-20 20:05:32.046278711 +0000 UTC m=+959.997003680" watchObservedRunningTime="2026-01-20 20:05:32.047075193 +0000 UTC m=+959.997800152" Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.047766 4948 generic.go:334] "Generic (PLEG): container finished" podID="19434efc-51da-454c-a87d-91bd70e97ad1" containerID="c83e0f39d777297f6e3dc2807a8e05b369b1f4126665bed3026397f23c7a7066" exitCode=0 Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.047834 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-qnfsz" event={"ID":"19434efc-51da-454c-a87d-91bd70e97ad1","Type":"ContainerDied","Data":"c83e0f39d777297f6e3dc2807a8e05b369b1f4126665bed3026397f23c7a7066"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.049482 4948 generic.go:334] "Generic (PLEG): container finished" podID="5b8ef8bb-4baf-4b9e-b47f-e9b082d31759" containerID="defc9602a3aec24af7b0bcc94383737cda733142f7764368bf590714f79cbedc" exitCode=0 Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.049520 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ctqgn" event={"ID":"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759","Type":"ContainerDied","Data":"defc9602a3aec24af7b0bcc94383737cda733142f7764368bf590714f79cbedc"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.058544 4948 generic.go:334] "Generic (PLEG): container finished" podID="8665723e-3db4-4331-892a-015554f4c300" containerID="5a68b290623e7026f56160c6093714a427d69ef777dd603d05bfc4bbcc1a68ef" exitCode=0 Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.058640 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0912-account-create-update-r5z5f" event={"ID":"8665723e-3db4-4331-892a-015554f4c300","Type":"ContainerDied","Data":"5a68b290623e7026f56160c6093714a427d69ef777dd603d05bfc4bbcc1a68ef"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.062575 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8njnt" event={"ID":"24ac2816-d915-48c3-b75a-3f866aa46a43","Type":"ContainerStarted","Data":"23a254c510ad9724fbb174be37d080726f046614b0d6bab27ad7f7c41d29606f"} Jan 20 20:05:32 crc kubenswrapper[4948]: I0120 20:05:32.213746 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8njnt" podStartSLOduration=6.006522882 podStartE2EDuration="15.213726019s" podCreationTimestamp="2026-01-20 20:05:17 +0000 UTC" firstStartedPulling="2026-01-20 20:05:22.347987229 +0000 UTC m=+950.298712198" lastFinishedPulling="2026-01-20 20:05:31.555190366 +0000 UTC m=+959.505915335" observedRunningTime="2026-01-20 20:05:32.196412658 +0000 UTC m=+960.147137637" watchObservedRunningTime="2026-01-20 20:05:32.213726019 +0000 UTC m=+960.164450988" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.031501 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.031550 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.628570 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.729135 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a2522fe2-db81-4fae-abeb-e99db7690237-operator-scripts\") pod \"a2522fe2-db81-4fae-abeb-e99db7690237\" (UID: \"a2522fe2-db81-4fae-abeb-e99db7690237\") " Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.729291 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwl7n\" (UniqueName: \"kubernetes.io/projected/a2522fe2-db81-4fae-abeb-e99db7690237-kube-api-access-zwl7n\") pod \"a2522fe2-db81-4fae-abeb-e99db7690237\" (UID: \"a2522fe2-db81-4fae-abeb-e99db7690237\") " Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.730196 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2522fe2-db81-4fae-abeb-e99db7690237-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a2522fe2-db81-4fae-abeb-e99db7690237" (UID: "a2522fe2-db81-4fae-abeb-e99db7690237"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.736933 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2522fe2-db81-4fae-abeb-e99db7690237-kube-api-access-zwl7n" (OuterVolumeSpecName: "kube-api-access-zwl7n") pod "a2522fe2-db81-4fae-abeb-e99db7690237" (UID: "a2522fe2-db81-4fae-abeb-e99db7690237"). InnerVolumeSpecName "kube-api-access-zwl7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.831206 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwl7n\" (UniqueName: \"kubernetes.io/projected/a2522fe2-db81-4fae-abeb-e99db7690237-kube-api-access-zwl7n\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.831461 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a2522fe2-db81-4fae-abeb-e99db7690237-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.896223 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.897121 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.932979 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.933159 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7dvv\" (UniqueName: \"kubernetes.io/projected/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-kube-api-access-s7dvv\") pod \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\" (UID: \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\") " Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.933211 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-operator-scripts\") pod \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\" (UID: \"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759\") " Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.933255 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bdns\" (UniqueName: \"kubernetes.io/projected/19434efc-51da-454c-a87d-91bd70e97ad1-kube-api-access-8bdns\") pod \"19434efc-51da-454c-a87d-91bd70e97ad1\" (UID: \"19434efc-51da-454c-a87d-91bd70e97ad1\") " Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.933388 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19434efc-51da-454c-a87d-91bd70e97ad1-operator-scripts\") pod \"19434efc-51da-454c-a87d-91bd70e97ad1\" (UID: \"19434efc-51da-454c-a87d-91bd70e97ad1\") " Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.934310 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5b8ef8bb-4baf-4b9e-b47f-e9b082d31759" (UID: "5b8ef8bb-4baf-4b9e-b47f-e9b082d31759"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.934347 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19434efc-51da-454c-a87d-91bd70e97ad1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "19434efc-51da-454c-a87d-91bd70e97ad1" (UID: "19434efc-51da-454c-a87d-91bd70e97ad1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.939414 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19434efc-51da-454c-a87d-91bd70e97ad1-kube-api-access-8bdns" (OuterVolumeSpecName: "kube-api-access-8bdns") pod "19434efc-51da-454c-a87d-91bd70e97ad1" (UID: "19434efc-51da-454c-a87d-91bd70e97ad1"). InnerVolumeSpecName "kube-api-access-8bdns". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.945084 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-kube-api-access-s7dvv" (OuterVolumeSpecName: "kube-api-access-s7dvv") pod "5b8ef8bb-4baf-4b9e-b47f-e9b082d31759" (UID: "5b8ef8bb-4baf-4b9e-b47f-e9b082d31759"). InnerVolumeSpecName "kube-api-access-s7dvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:33 crc kubenswrapper[4948]: I0120 20:05:33.995910 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.034530 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcm5l\" (UniqueName: \"kubernetes.io/projected/8665723e-3db4-4331-892a-015554f4c300-kube-api-access-jcm5l\") pod \"8665723e-3db4-4331-892a-015554f4c300\" (UID: \"8665723e-3db4-4331-892a-015554f4c300\") " Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.034608 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-operator-scripts\") pod \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\" (UID: \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\") " Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.034660 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xqz57\" (UniqueName: \"kubernetes.io/projected/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-kube-api-access-xqz57\") pod \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\" (UID: \"d2cf4ce2-6783-421e-9ca3-2bb938815f2f\") " Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.034734 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8665723e-3db4-4331-892a-015554f4c300-operator-scripts\") pod \"8665723e-3db4-4331-892a-015554f4c300\" (UID: \"8665723e-3db4-4331-892a-015554f4c300\") " Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.035062 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7dvv\" (UniqueName: \"kubernetes.io/projected/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-kube-api-access-s7dvv\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.035077 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.035087 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bdns\" (UniqueName: \"kubernetes.io/projected/19434efc-51da-454c-a87d-91bd70e97ad1-kube-api-access-8bdns\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.035096 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19434efc-51da-454c-a87d-91bd70e97ad1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.035134 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d2cf4ce2-6783-421e-9ca3-2bb938815f2f" (UID: "d2cf4ce2-6783-421e-9ca3-2bb938815f2f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.035427 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8665723e-3db4-4331-892a-015554f4c300-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8665723e-3db4-4331-892a-015554f4c300" (UID: "8665723e-3db4-4331-892a-015554f4c300"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.035860 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.038237 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8665723e-3db4-4331-892a-015554f4c300-kube-api-access-jcm5l" (OuterVolumeSpecName: "kube-api-access-jcm5l") pod "8665723e-3db4-4331-892a-015554f4c300" (UID: "8665723e-3db4-4331-892a-015554f4c300"). InnerVolumeSpecName "kube-api-access-jcm5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.050354 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-kube-api-access-xqz57" (OuterVolumeSpecName: "kube-api-access-xqz57") pod "d2cf4ce2-6783-421e-9ca3-2bb938815f2f" (UID: "d2cf4ce2-6783-421e-9ca3-2bb938815f2f"). InnerVolumeSpecName "kube-api-access-xqz57". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.133971 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-jmpg6" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="registry-server" probeResult="failure" output=< Jan 20 20:05:34 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:05:34 crc kubenswrapper[4948]: > Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.135358 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01681e12-ad87-49f8-8f36-0631b107e19d-operator-scripts\") pod \"01681e12-ad87-49f8-8f36-0631b107e19d\" (UID: \"01681e12-ad87-49f8-8f36-0631b107e19d\") " Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.135949 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01681e12-ad87-49f8-8f36-0631b107e19d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "01681e12-ad87-49f8-8f36-0631b107e19d" (UID: "01681e12-ad87-49f8-8f36-0631b107e19d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.136079 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8l8kc\" (UniqueName: \"kubernetes.io/projected/01681e12-ad87-49f8-8f36-0631b107e19d-kube-api-access-8l8kc\") pod \"01681e12-ad87-49f8-8f36-0631b107e19d\" (UID: \"01681e12-ad87-49f8-8f36-0631b107e19d\") " Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.136349 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.136360 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xqz57\" (UniqueName: \"kubernetes.io/projected/d2cf4ce2-6783-421e-9ca3-2bb938815f2f-kube-api-access-xqz57\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.136371 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01681e12-ad87-49f8-8f36-0631b107e19d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.136381 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8665723e-3db4-4331-892a-015554f4c300-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.136390 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcm5l\" (UniqueName: \"kubernetes.io/projected/8665723e-3db4-4331-892a-015554f4c300-kube-api-access-jcm5l\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.138990 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"e0523a859eeefc119974008183940f1fcef1a6f3ed1d056e36a4a2eb301b4828"} Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.139031 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"736a2ad4973bc7a73c31c320b62c145007b724e2125713ac14bf8c3a57e3e012"} Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.142186 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01681e12-ad87-49f8-8f36-0631b107e19d-kube-api-access-8l8kc" (OuterVolumeSpecName: "kube-api-access-8l8kc") pod "01681e12-ad87-49f8-8f36-0631b107e19d" (UID: "01681e12-ad87-49f8-8f36-0631b107e19d"). InnerVolumeSpecName "kube-api-access-8l8kc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.150083 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-5116-account-create-update-6hrrc" event={"ID":"01681e12-ad87-49f8-8f36-0631b107e19d","Type":"ContainerDied","Data":"54011166c361352066a19fff377d722340636188cc7c2103ec1503e4b88a849b"} Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.150124 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54011166c361352066a19fff377d722340636188cc7c2103ec1503e4b88a849b" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.150179 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-5116-account-create-update-6hrrc" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.202080 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-7x47d" event={"ID":"d2cf4ce2-6783-421e-9ca3-2bb938815f2f","Type":"ContainerDied","Data":"360a2e5820d056783ed1bc6c644fc5aefca138cf9597c85e0e72ba1c386f805b"} Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.202131 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="360a2e5820d056783ed1bc6c644fc5aefca138cf9597c85e0e72ba1c386f805b" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.202242 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-7x47d" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.227592 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-ctqgn" event={"ID":"5b8ef8bb-4baf-4b9e-b47f-e9b082d31759","Type":"ContainerDied","Data":"c62d0c729ef35e3eba95c7583fe5a5829b76fff8a6b38643f0c2241c8d164bea"} Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.227640 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c62d0c729ef35e3eba95c7583fe5a5829b76fff8a6b38643f0c2241c8d164bea" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.227866 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-ctqgn" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.241672 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8l8kc\" (UniqueName: \"kubernetes.io/projected/01681e12-ad87-49f8-8f36-0631b107e19d-kube-api-access-8l8kc\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.263541 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0912-account-create-update-r5z5f" event={"ID":"8665723e-3db4-4331-892a-015554f4c300","Type":"ContainerDied","Data":"0ca12fc1010b6140fac61724a0995803f1771b86040656f4139e80d940182a06"} Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.263588 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ca12fc1010b6140fac61724a0995803f1771b86040656f4139e80d940182a06" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.263648 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0912-account-create-update-r5z5f" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.274609 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-16db-account-create-update-d7lmx" event={"ID":"a2522fe2-db81-4fae-abeb-e99db7690237","Type":"ContainerDied","Data":"b4452d9c8b940cd63de574df21b3866d5368fc2c5e5da9fa08a1fd3f1638dc12"} Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.274665 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4452d9c8b940cd63de574df21b3866d5368fc2c5e5da9fa08a1fd3f1638dc12" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.274720 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-16db-account-create-update-d7lmx" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.282952 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-qnfsz" event={"ID":"19434efc-51da-454c-a87d-91bd70e97ad1","Type":"ContainerDied","Data":"7056ca93f22700c9f97621086f6784b918e2720e7a9002ac22dc6bdee2e4e7d2"} Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.283000 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7056ca93f22700c9f97621086f6784b918e2720e7a9002ac22dc6bdee2e4e7d2" Jan 20 20:05:34 crc kubenswrapper[4948]: I0120 20:05:34.283068 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-qnfsz" Jan 20 20:05:35 crc kubenswrapper[4948]: I0120 20:05:35.421941 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"1bf0b567eae32d289299af845a61f4a0bf91e6c11cc03648b699d51eaa9fd174"} Jan 20 20:05:37 crc kubenswrapper[4948]: I0120 20:05:37.429634 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:37 crc kubenswrapper[4948]: I0120 20:05:37.430249 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:38 crc kubenswrapper[4948]: I0120 20:05:38.489666 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-8njnt" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="registry-server" probeResult="failure" output=< Jan 20 20:05:38 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:05:38 crc kubenswrapper[4948]: > Jan 20 20:05:39 crc kubenswrapper[4948]: I0120 20:05:39.491849 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cc7hs" event={"ID":"8dd9b1bc-11ee-4556-8c6a-699196c19ec1","Type":"ContainerStarted","Data":"8333bb56024fda1ea6ab2ff9247306ba41ed96b6942899396893d6dba5549a97"} Jan 20 20:05:39 crc kubenswrapper[4948]: I0120 20:05:39.523893 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"d077c21383f70ce9052c3eb345e346a76fb749d39c1e80e4c606c264fc5f5127"} Jan 20 20:05:39 crc kubenswrapper[4948]: I0120 20:05:39.547978 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-cc7hs" podStartSLOduration=4.388974416 podStartE2EDuration="15.547949577s" podCreationTimestamp="2026-01-20 20:05:24 +0000 UTC" firstStartedPulling="2026-01-20 20:05:27.582189248 +0000 UTC m=+955.532914217" lastFinishedPulling="2026-01-20 20:05:38.741164419 +0000 UTC m=+966.691889378" observedRunningTime="2026-01-20 20:05:39.545316842 +0000 UTC m=+967.496041811" watchObservedRunningTime="2026-01-20 20:05:39.547949577 +0000 UTC m=+967.498674546" Jan 20 20:05:40 crc kubenswrapper[4948]: I0120 20:05:40.536224 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-fdwn2" event={"ID":"d96cb8cd-dfa3-4d70-af44-be9627945b5f","Type":"ContainerStarted","Data":"5f03c6d62c705dccc787efee2f93f6e8d2b2f77510a812f0bc73e9f963f47546"} Jan 20 20:05:40 crc kubenswrapper[4948]: I0120 20:05:40.563410 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-fdwn2" podStartSLOduration=5.880838067 podStartE2EDuration="44.56338506s" podCreationTimestamp="2026-01-20 20:04:56 +0000 UTC" firstStartedPulling="2026-01-20 20:05:00.044471115 +0000 UTC m=+927.995196084" lastFinishedPulling="2026-01-20 20:05:38.727018108 +0000 UTC m=+966.677743077" observedRunningTime="2026-01-20 20:05:40.560438727 +0000 UTC m=+968.511163696" watchObservedRunningTime="2026-01-20 20:05:40.56338506 +0000 UTC m=+968.514110029" Jan 20 20:05:41 crc kubenswrapper[4948]: I0120 20:05:41.551473 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"2455b44ec2551791791377ce6abc9401e6dc83645da4068250b9da0f7d5071ac"} Jan 20 20:05:41 crc kubenswrapper[4948]: I0120 20:05:41.552849 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"b80586d54b5a12141f251856021f02535b894dcf7d5082142b965636013624af"} Jan 20 20:05:41 crc kubenswrapper[4948]: I0120 20:05:41.552957 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"afc5e91aa9e0eae72bb9c90855d64409101b57d3f573c27d9c33dd09f3dc3d50"} Jan 20 20:05:41 crc kubenswrapper[4948]: I0120 20:05:41.553409 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"132d644b38433e8b6ffeb11025c0c38483c161a66d3fee417c1e5d02a290651b"} Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.565588 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"afb4df8a52b6350e7c67647ba4a9d67226e48e170e93b593dca33e1b9a1ffa4a"} Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.565954 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"50a81a3f75a491eb237807738395f2c7a7eae33f2ae4dac7737f86122b3068dc"} Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.565965 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"253a8193-904e-4f62-adbe-597b97b4fd30","Type":"ContainerStarted","Data":"64d70bf7fed668346730392cb2d60cb03d357ecbdabb81ee1f087c29c0812a30"} Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.617870 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=36.405058978 podStartE2EDuration="54.617841446s" podCreationTimestamp="2026-01-20 20:04:48 +0000 UTC" firstStartedPulling="2026-01-20 20:05:22.294760249 +0000 UTC m=+950.245485218" lastFinishedPulling="2026-01-20 20:05:40.507542717 +0000 UTC m=+968.458267686" observedRunningTime="2026-01-20 20:05:42.614612504 +0000 UTC m=+970.565337473" watchObservedRunningTime="2026-01-20 20:05:42.617841446 +0000 UTC m=+970.568566415" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.959913 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-l799c"] Jan 20 20:05:42 crc kubenswrapper[4948]: E0120 20:05:42.960379 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19434efc-51da-454c-a87d-91bd70e97ad1" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960403 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="19434efc-51da-454c-a87d-91bd70e97ad1" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: E0120 20:05:42.960419 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01681e12-ad87-49f8-8f36-0631b107e19d" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960427 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="01681e12-ad87-49f8-8f36-0631b107e19d" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: E0120 20:05:42.960442 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2522fe2-db81-4fae-abeb-e99db7690237" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960451 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2522fe2-db81-4fae-abeb-e99db7690237" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: E0120 20:05:42.960478 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8665723e-3db4-4331-892a-015554f4c300" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960487 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8665723e-3db4-4331-892a-015554f4c300" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: E0120 20:05:42.960501 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2cf4ce2-6783-421e-9ca3-2bb938815f2f" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960509 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2cf4ce2-6783-421e-9ca3-2bb938815f2f" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: E0120 20:05:42.960528 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" containerName="ovn-config" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960536 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" containerName="ovn-config" Jan 20 20:05:42 crc kubenswrapper[4948]: E0120 20:05:42.960547 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b8ef8bb-4baf-4b9e-b47f-e9b082d31759" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960555 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b8ef8bb-4baf-4b9e-b47f-e9b082d31759" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960920 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="01681e12-ad87-49f8-8f36-0631b107e19d" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960942 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4facbac8-bbd0-4d0b-83d9-bf2ce7834a40" containerName="ovn-config" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960955 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2522fe2-db81-4fae-abeb-e99db7690237" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960968 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b8ef8bb-4baf-4b9e-b47f-e9b082d31759" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.960985 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="19434efc-51da-454c-a87d-91bd70e97ad1" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.961001 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2cf4ce2-6783-421e-9ca3-2bb938815f2f" containerName="mariadb-database-create" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.961009 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="8665723e-3db4-4331-892a-015554f4c300" containerName="mariadb-account-create-update" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.962186 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.964170 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 20 20:05:42 crc kubenswrapper[4948]: I0120 20:05:42.980933 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-l799c"] Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.072048 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-svc\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.072508 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.072614 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.072645 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.072786 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7xrl\" (UniqueName: \"kubernetes.io/projected/9d79e045-9533-4d4b-bd78-fa0a5b707a53-kube-api-access-x7xrl\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.072817 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-config\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.083354 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.128158 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.173959 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7xrl\" (UniqueName: \"kubernetes.io/projected/9d79e045-9533-4d4b-bd78-fa0a5b707a53-kube-api-access-x7xrl\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.174027 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-config\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.174094 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-svc\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.175151 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-svc\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.175164 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.175196 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-config\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.175203 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.175320 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.175346 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.175967 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.176116 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.206242 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7xrl\" (UniqueName: \"kubernetes.io/projected/9d79e045-9533-4d4b-bd78-fa0a5b707a53-kube-api-access-x7xrl\") pod \"dnsmasq-dns-764c5664d7-l799c\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.278914 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.791855 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-l799c"] Jan 20 20:05:43 crc kubenswrapper[4948]: I0120 20:05:43.918272 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jmpg6"] Jan 20 20:05:44 crc kubenswrapper[4948]: I0120 20:05:44.590978 4948 generic.go:334] "Generic (PLEG): container finished" podID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" containerID="5356317bcc14d3e40adcca640d6e6651c15bbdf7ac8705cb0e9d8e70825a8966" exitCode=0 Jan 20 20:05:44 crc kubenswrapper[4948]: I0120 20:05:44.591121 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-l799c" event={"ID":"9d79e045-9533-4d4b-bd78-fa0a5b707a53","Type":"ContainerDied","Data":"5356317bcc14d3e40adcca640d6e6651c15bbdf7ac8705cb0e9d8e70825a8966"} Jan 20 20:05:44 crc kubenswrapper[4948]: I0120 20:05:44.591364 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-l799c" event={"ID":"9d79e045-9533-4d4b-bd78-fa0a5b707a53","Type":"ContainerStarted","Data":"0b4de25240ed41722e0593651f4997ca61547a3f201fad0950b4919600cde303"} Jan 20 20:05:44 crc kubenswrapper[4948]: I0120 20:05:44.591740 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jmpg6" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="registry-server" containerID="cri-o://63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834" gracePeriod=2 Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.155038 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.330347 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-catalog-content\") pod \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.330638 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tjhl\" (UniqueName: \"kubernetes.io/projected/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-kube-api-access-2tjhl\") pod \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.330680 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-utilities\") pod \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\" (UID: \"eba0bf3a-2428-41df-a1b2-bdfd93056ff4\") " Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.331569 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-utilities" (OuterVolumeSpecName: "utilities") pod "eba0bf3a-2428-41df-a1b2-bdfd93056ff4" (UID: "eba0bf3a-2428-41df-a1b2-bdfd93056ff4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.342579 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-kube-api-access-2tjhl" (OuterVolumeSpecName: "kube-api-access-2tjhl") pod "eba0bf3a-2428-41df-a1b2-bdfd93056ff4" (UID: "eba0bf3a-2428-41df-a1b2-bdfd93056ff4"). InnerVolumeSpecName "kube-api-access-2tjhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.398841 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eba0bf3a-2428-41df-a1b2-bdfd93056ff4" (UID: "eba0bf3a-2428-41df-a1b2-bdfd93056ff4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.433113 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tjhl\" (UniqueName: \"kubernetes.io/projected/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-kube-api-access-2tjhl\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.433163 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.433176 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eba0bf3a-2428-41df-a1b2-bdfd93056ff4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.600876 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-l799c" event={"ID":"9d79e045-9533-4d4b-bd78-fa0a5b707a53","Type":"ContainerStarted","Data":"0b5aaedfab46e66448fad5ad92ee3a5eda8f5f5bd28cf9a0b4321a1439fc928f"} Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.601216 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.603354 4948 generic.go:334] "Generic (PLEG): container finished" podID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerID="63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834" exitCode=0 Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.603391 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jmpg6" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.603422 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmpg6" event={"ID":"eba0bf3a-2428-41df-a1b2-bdfd93056ff4","Type":"ContainerDied","Data":"63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834"} Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.603635 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jmpg6" event={"ID":"eba0bf3a-2428-41df-a1b2-bdfd93056ff4","Type":"ContainerDied","Data":"f4e4fb748be661b34bc14379f6883873caa6471a04171b97c671dead20c72d36"} Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.603692 4948 scope.go:117] "RemoveContainer" containerID="63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.607450 4948 generic.go:334] "Generic (PLEG): container finished" podID="8dd9b1bc-11ee-4556-8c6a-699196c19ec1" containerID="8333bb56024fda1ea6ab2ff9247306ba41ed96b6942899396893d6dba5549a97" exitCode=0 Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.607479 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cc7hs" event={"ID":"8dd9b1bc-11ee-4556-8c6a-699196c19ec1","Type":"ContainerDied","Data":"8333bb56024fda1ea6ab2ff9247306ba41ed96b6942899396893d6dba5549a97"} Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.628659 4948 scope.go:117] "RemoveContainer" containerID="c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.641759 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764c5664d7-l799c" podStartSLOduration=3.641734414 podStartE2EDuration="3.641734414s" podCreationTimestamp="2026-01-20 20:05:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:45.62852741 +0000 UTC m=+973.579252379" watchObservedRunningTime="2026-01-20 20:05:45.641734414 +0000 UTC m=+973.592459383" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.654798 4948 scope.go:117] "RemoveContainer" containerID="cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.695626 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jmpg6"] Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.697891 4948 scope.go:117] "RemoveContainer" containerID="63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834" Jan 20 20:05:45 crc kubenswrapper[4948]: E0120 20:05:45.699155 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834\": container with ID starting with 63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834 not found: ID does not exist" containerID="63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.699234 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834"} err="failed to get container status \"63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834\": rpc error: code = NotFound desc = could not find container \"63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834\": container with ID starting with 63dbf8d5e37d0a14e3f59a7c6466080e4fd54c57b5dc92f150301eec492fb834 not found: ID does not exist" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.699274 4948 scope.go:117] "RemoveContainer" containerID="c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c" Jan 20 20:05:45 crc kubenswrapper[4948]: E0120 20:05:45.700452 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c\": container with ID starting with c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c not found: ID does not exist" containerID="c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.700597 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c"} err="failed to get container status \"c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c\": rpc error: code = NotFound desc = could not find container \"c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c\": container with ID starting with c49b3deaf54d516a61f9da8b446ea41874a36c1974b26b8b6e49e2987440174c not found: ID does not exist" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.700692 4948 scope.go:117] "RemoveContainer" containerID="cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560" Jan 20 20:05:45 crc kubenswrapper[4948]: E0120 20:05:45.701097 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560\": container with ID starting with cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560 not found: ID does not exist" containerID="cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.701137 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560"} err="failed to get container status \"cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560\": rpc error: code = NotFound desc = could not find container \"cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560\": container with ID starting with cf6389129e4a8b663f532bc5fa9fbaa6756b4ed47d09f2dc231807e513ab1560 not found: ID does not exist" Jan 20 20:05:45 crc kubenswrapper[4948]: I0120 20:05:45.709680 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jmpg6"] Jan 20 20:05:46 crc kubenswrapper[4948]: I0120 20:05:46.581633 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" path="/var/lib/kubelet/pods/eba0bf3a-2428-41df-a1b2-bdfd93056ff4/volumes" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.019562 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.161517 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-config-data\") pod \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.162045 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-combined-ca-bundle\") pod \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.162537 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zccb4\" (UniqueName: \"kubernetes.io/projected/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-kube-api-access-zccb4\") pod \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\" (UID: \"8dd9b1bc-11ee-4556-8c6a-699196c19ec1\") " Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.167847 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-kube-api-access-zccb4" (OuterVolumeSpecName: "kube-api-access-zccb4") pod "8dd9b1bc-11ee-4556-8c6a-699196c19ec1" (UID: "8dd9b1bc-11ee-4556-8c6a-699196c19ec1"). InnerVolumeSpecName "kube-api-access-zccb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.190927 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8dd9b1bc-11ee-4556-8c6a-699196c19ec1" (UID: "8dd9b1bc-11ee-4556-8c6a-699196c19ec1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.208602 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-config-data" (OuterVolumeSpecName: "config-data") pod "8dd9b1bc-11ee-4556-8c6a-699196c19ec1" (UID: "8dd9b1bc-11ee-4556-8c6a-699196c19ec1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.264999 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.265291 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zccb4\" (UniqueName: \"kubernetes.io/projected/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-kube-api-access-zccb4\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.265304 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dd9b1bc-11ee-4556-8c6a-699196c19ec1-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.475010 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.536672 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.629544 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-cc7hs" event={"ID":"8dd9b1bc-11ee-4556-8c6a-699196c19ec1","Type":"ContainerDied","Data":"f836bda370cc551faa1f5e836cf8c005c60af1a012cc7155cd97ba9d99ecf70b"} Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.629619 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f836bda370cc551faa1f5e836cf8c005c60af1a012cc7155cd97ba9d99ecf70b" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.629559 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-cc7hs" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.987934 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-m5tvw"] Jan 20 20:05:47 crc kubenswrapper[4948]: E0120 20:05:47.988324 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dd9b1bc-11ee-4556-8c6a-699196c19ec1" containerName="keystone-db-sync" Jan 20 20:05:47 crc kubenswrapper[4948]: I0120 20:05:47.988345 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dd9b1bc-11ee-4556-8c6a-699196c19ec1" containerName="keystone-db-sync" Jan 20 20:05:48 crc kubenswrapper[4948]: E0120 20:05:48.001486 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="registry-server" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.001528 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="registry-server" Jan 20 20:05:48 crc kubenswrapper[4948]: E0120 20:05:48.001566 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="extract-content" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.001572 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="extract-content" Jan 20 20:05:48 crc kubenswrapper[4948]: E0120 20:05:48.001586 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="extract-utilities" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.001595 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="extract-utilities" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.001971 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dd9b1bc-11ee-4556-8c6a-699196c19ec1" containerName="keystone-db-sync" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.001983 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="eba0bf3a-2428-41df-a1b2-bdfd93056ff4" containerName="registry-server" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.002684 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.011190 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.011443 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.011659 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.011835 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.011928 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9zfkq" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.043331 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-m5tvw"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.076249 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-l799c"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.076585 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764c5664d7-l799c" podUID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" containerName="dnsmasq-dns" containerID="cri-o://0b5aaedfab46e66448fad5ad92ee3a5eda8f5f5bd28cf9a0b4321a1439fc928f" gracePeriod=10 Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.087813 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-credential-keys\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.087872 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-fernet-keys\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.087908 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w8z8\" (UniqueName: \"kubernetes.io/projected/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-kube-api-access-2w8z8\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.089141 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-config-data\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.089363 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-scripts\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.089386 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-combined-ca-bundle\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.154202 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-lkk6z"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.155491 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.194953 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-lkk6z"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.196789 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-combined-ca-bundle\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.196870 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-svc\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.196947 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.196984 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.197043 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-credential-keys\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.197071 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-fernet-keys\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.197115 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w8z8\" (UniqueName: \"kubernetes.io/projected/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-kube-api-access-2w8z8\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.197263 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrc7k\" (UniqueName: \"kubernetes.io/projected/f2718563-3639-4c91-abc9-0a7132d7cf7b-kube-api-access-qrc7k\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.197357 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.197381 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-config-data\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.197449 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-config\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.197610 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-scripts\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.210783 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-credential-keys\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.211071 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-combined-ca-bundle\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.211334 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-config-data\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.212299 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-scripts\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.216035 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-fernet-keys\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.251212 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w8z8\" (UniqueName: \"kubernetes.io/projected/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-kube-api-access-2w8z8\") pod \"keystone-bootstrap-m5tvw\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.305760 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.305804 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-config\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.305859 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-svc\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.305885 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.305907 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.305960 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrc7k\" (UniqueName: \"kubernetes.io/projected/f2718563-3639-4c91-abc9-0a7132d7cf7b-kube-api-access-qrc7k\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.309428 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.309444 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.309635 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-config\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.309976 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-svc\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.310540 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.340141 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.424985 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8njnt"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.462641 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrc7k\" (UniqueName: \"kubernetes.io/projected/f2718563-3639-4c91-abc9-0a7132d7cf7b-kube-api-access-qrc7k\") pod \"dnsmasq-dns-5959f8865f-lkk6z\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.499330 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.504765 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-dchk5"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.506087 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.517115 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-2fhzd" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.527023 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.544123 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.544361 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.544586 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.567414 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.574336 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.618780 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-run-httpd\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.618918 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-scripts\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.619539 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4qf6\" (UniqueName: \"kubernetes.io/projected/6cf14434-5ac6-4983-8abe-7305b182c92d-kube-api-access-q4qf6\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.619597 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-config-data\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.619658 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/974e456e-61d1-4c5e-a8c9-9ebbb5246848-etc-machine-id\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.619910 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-config-data\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.620012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.620062 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-scripts\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.620130 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-log-httpd\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.620169 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gk68v\" (UniqueName: \"kubernetes.io/projected/974e456e-61d1-4c5e-a8c9-9ebbb5246848-kube-api-access-gk68v\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.620242 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-db-sync-config-data\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.620315 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-combined-ca-bundle\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.620343 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.657440 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-789494c67c-djqgh"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.684396 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.697373 4948 generic.go:334] "Generic (PLEG): container finished" podID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" containerID="0b5aaedfab46e66448fad5ad92ee3a5eda8f5f5bd28cf9a0b4321a1439fc928f" exitCode=0 Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.697860 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8njnt" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="registry-server" containerID="cri-o://23a254c510ad9724fbb174be37d080726f046614b0d6bab27ad7f7c41d29606f" gracePeriod=2 Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.698215 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-l799c" event={"ID":"9d79e045-9533-4d4b-bd78-fa0a5b707a53","Type":"ContainerDied","Data":"0b5aaedfab46e66448fad5ad92ee3a5eda8f5f5bd28cf9a0b4321a1439fc928f"} Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.726315 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.726901 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.727090 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.727254 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-q7qpv" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737175 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/152975f8-dda3-4343-8122-9d3506495970-logs\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737248 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-config-data\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737312 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737343 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-scripts\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737375 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-scripts\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737413 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-log-httpd\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737440 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gk68v\" (UniqueName: \"kubernetes.io/projected/974e456e-61d1-4c5e-a8c9-9ebbb5246848-kube-api-access-gk68v\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737499 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-db-sync-config-data\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737526 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkqw9\" (UniqueName: \"kubernetes.io/projected/152975f8-dda3-4343-8122-9d3506495970-kube-api-access-tkqw9\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737555 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-combined-ca-bundle\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737572 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737591 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-run-httpd\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737637 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-scripts\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737676 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/152975f8-dda3-4343-8122-9d3506495970-horizon-secret-key\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.737764 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4qf6\" (UniqueName: \"kubernetes.io/projected/6cf14434-5ac6-4983-8abe-7305b182c92d-kube-api-access-q4qf6\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.739829 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-dchk5"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.740347 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-config-data\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.740402 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-config-data\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.740424 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/974e456e-61d1-4c5e-a8c9-9ebbb5246848-etc-machine-id\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.740524 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/974e456e-61d1-4c5e-a8c9-9ebbb5246848-etc-machine-id\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.754153 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-log-httpd\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.757767 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-run-httpd\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.760224 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-config-data\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.762074 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-combined-ca-bundle\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.762722 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.765951 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-scripts\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.766385 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-db-sync-config-data\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.767048 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-scripts\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.789053 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-config-data\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.790022 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.823024 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.848309 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-scripts\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.848397 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkqw9\" (UniqueName: \"kubernetes.io/projected/152975f8-dda3-4343-8122-9d3506495970-kube-api-access-tkqw9\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.848446 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/152975f8-dda3-4343-8122-9d3506495970-horizon-secret-key\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.848495 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-config-data\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.848527 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/152975f8-dda3-4343-8122-9d3506495970-logs\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.848896 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/152975f8-dda3-4343-8122-9d3506495970-logs\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.849444 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-scripts\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.851549 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4qf6\" (UniqueName: \"kubernetes.io/projected/6cf14434-5ac6-4983-8abe-7305b182c92d-kube-api-access-q4qf6\") pod \"ceilometer-0\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " pod="openstack/ceilometer-0" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.852121 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-config-data\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.854728 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gk68v\" (UniqueName: \"kubernetes.io/projected/974e456e-61d1-4c5e-a8c9-9ebbb5246848-kube-api-access-gk68v\") pod \"cinder-db-sync-dchk5\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.862193 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/152975f8-dda3-4343-8122-9d3506495970-horizon-secret-key\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.873272 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dchk5" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.894835 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkqw9\" (UniqueName: \"kubernetes.io/projected/152975f8-dda3-4343-8122-9d3506495970-kube-api-access-tkqw9\") pod \"horizon-789494c67c-djqgh\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.947836 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-789494c67c-djqgh"] Jan 20 20:05:48 crc kubenswrapper[4948]: I0120 20:05:48.956212 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.005801 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-qxsld"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.007098 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.011365 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.011630 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-mrjrl" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.027806 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-qxsld"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.056307 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-lkk6z"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.056438 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-99f6n"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.058459 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.060956 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-combined-ca-bundle\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.061166 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-db-sync-config-data\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.061256 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-scripts\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.061393 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-combined-ca-bundle\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.061465 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn6js\" (UniqueName: \"kubernetes.io/projected/4a24a241-d8d2-484c-ae7b-436777e1fddd-kube-api-access-wn6js\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.061560 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-config-data\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.064910 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqdrh\" (UniqueName: \"kubernetes.io/projected/0fa00dfc-b064-4964-a65d-80809492c96d-kube-api-access-gqdrh\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.065150 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fa00dfc-b064-4964-a65d-80809492c96d-logs\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.064481 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.064935 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.070374 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-nvrsd" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.070603 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.091648 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-99f6n"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.166555 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-combined-ca-bundle\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.166617 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn6js\" (UniqueName: \"kubernetes.io/projected/4a24a241-d8d2-484c-ae7b-436777e1fddd-kube-api-access-wn6js\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.166639 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-config-data\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.166689 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqdrh\" (UniqueName: \"kubernetes.io/projected/0fa00dfc-b064-4964-a65d-80809492c96d-kube-api-access-gqdrh\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.166749 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fa00dfc-b064-4964-a65d-80809492c96d-logs\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.166785 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-combined-ca-bundle\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.166810 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-db-sync-config-data\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.166830 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-scripts\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.187393 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-combined-ca-bundle\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.187698 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fa00dfc-b064-4964-a65d-80809492c96d-logs\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.212514 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-5dp57"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.216339 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-scripts\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.217197 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.219616 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-combined-ca-bundle\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.219817 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-db-sync-config-data\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.232090 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.232303 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.232480 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-r9l27" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.232576 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqdrh\" (UniqueName: \"kubernetes.io/projected/0fa00dfc-b064-4964-a65d-80809492c96d-kube-api-access-gqdrh\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.260823 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-config-data\") pod \"placement-db-sync-99f6n\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.262072 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-5dp57"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.312734 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-57b75d5c69-bjxh7"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.314454 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.315630 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn6js\" (UniqueName: \"kubernetes.io/projected/4a24a241-d8d2-484c-ae7b-436777e1fddd-kube-api-access-wn6js\") pod \"barbican-db-sync-qxsld\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.371097 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-qxsld" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.374572 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-combined-ca-bundle\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.374631 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rhm8\" (UniqueName: \"kubernetes.io/projected/c4d16876-ed2f-4186-801c-48d52e01ac8c-kube-api-access-4rhm8\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.374742 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-config\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.411588 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-5rhgw"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.413124 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.422347 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-99f6n" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.456525 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-5rhgw"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.487571 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c22039a6-695a-4abb-adcc-631c6703e03b-horizon-secret-key\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.487646 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-combined-ca-bundle\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.487695 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rhm8\" (UniqueName: \"kubernetes.io/projected/c4d16876-ed2f-4186-801c-48d52e01ac8c-kube-api-access-4rhm8\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.530252 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-config-data\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.528888 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-combined-ca-bundle\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.489190 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-57b75d5c69-bjxh7"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.531538 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c22039a6-695a-4abb-adcc-631c6703e03b-logs\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.531722 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-scripts\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.531872 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzcv9\" (UniqueName: \"kubernetes.io/projected/c22039a6-695a-4abb-adcc-631c6703e03b-kube-api-access-hzcv9\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.531985 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-config\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.567487 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-config\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.571007 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rhm8\" (UniqueName: \"kubernetes.io/projected/c4d16876-ed2f-4186-801c-48d52e01ac8c-kube-api-access-4rhm8\") pod \"neutron-db-sync-5dp57\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638279 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638422 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzmmv\" (UniqueName: \"kubernetes.io/projected/2c19042c-af73-4228-a686-15cb4f7365cf-kube-api-access-tzmmv\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638472 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c22039a6-695a-4abb-adcc-631c6703e03b-horizon-secret-key\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638501 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638540 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-config\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638574 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638604 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638733 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-config-data\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638758 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c22039a6-695a-4abb-adcc-631c6703e03b-logs\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638905 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-scripts\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.638941 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzcv9\" (UniqueName: \"kubernetes.io/projected/c22039a6-695a-4abb-adcc-631c6703e03b-kube-api-access-hzcv9\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.647089 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-config-data\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.653586 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c22039a6-695a-4abb-adcc-631c6703e03b-logs\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.666228 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c22039a6-695a-4abb-adcc-631c6703e03b-horizon-secret-key\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.672615 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-scripts\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.675416 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzcv9\" (UniqueName: \"kubernetes.io/projected/c22039a6-695a-4abb-adcc-631c6703e03b-kube-api-access-hzcv9\") pod \"horizon-57b75d5c69-bjxh7\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.732322 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-l799c" event={"ID":"9d79e045-9533-4d4b-bd78-fa0a5b707a53","Type":"ContainerDied","Data":"0b4de25240ed41722e0593651f4997ca61547a3f201fad0950b4919600cde303"} Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.732366 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b4de25240ed41722e0593651f4997ca61547a3f201fad0950b4919600cde303" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.740560 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.741373 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzmmv\" (UniqueName: \"kubernetes.io/projected/2c19042c-af73-4228-a686-15cb4f7365cf-kube-api-access-tzmmv\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.741480 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.741561 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-config\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.741720 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.742018 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.743042 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.746886 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.747266 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.747741 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-config\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.748260 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.766139 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.772575 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzmmv\" (UniqueName: \"kubernetes.io/projected/2c19042c-af73-4228-a686-15cb4f7365cf-kube-api-access-tzmmv\") pod \"dnsmasq-dns-58dd9ff6bc-5rhgw\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.766398 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8njnt" event={"ID":"24ac2816-d915-48c3-b75a-3f866aa46a43","Type":"ContainerDied","Data":"23a254c510ad9724fbb174be37d080726f046614b0d6bab27ad7f7c41d29606f"} Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.774337 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5dp57" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.766375 4948 generic.go:334] "Generic (PLEG): container finished" podID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerID="23a254c510ad9724fbb174be37d080726f046614b0d6bab27ad7f7c41d29606f" exitCode=0 Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.842728 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.844617 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.848863 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-m5tvw"] Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.955480 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-sb\") pod \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.956285 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-nb\") pod \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.956312 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-swift-storage-0\") pod \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.956525 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7xrl\" (UniqueName: \"kubernetes.io/projected/9d79e045-9533-4d4b-bd78-fa0a5b707a53-kube-api-access-x7xrl\") pod \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.956558 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-config\") pod \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.956578 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-svc\") pod \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\" (UID: \"9d79e045-9533-4d4b-bd78-fa0a5b707a53\") " Jan 20 20:05:49 crc kubenswrapper[4948]: I0120 20:05:49.986174 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d79e045-9533-4d4b-bd78-fa0a5b707a53-kube-api-access-x7xrl" (OuterVolumeSpecName: "kube-api-access-x7xrl") pod "9d79e045-9533-4d4b-bd78-fa0a5b707a53" (UID: "9d79e045-9533-4d4b-bd78-fa0a5b707a53"). InnerVolumeSpecName "kube-api-access-x7xrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.045961 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9d79e045-9533-4d4b-bd78-fa0a5b707a53" (UID: "9d79e045-9533-4d4b-bd78-fa0a5b707a53"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.061333 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.061379 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7xrl\" (UniqueName: \"kubernetes.io/projected/9d79e045-9533-4d4b-bd78-fa0a5b707a53-kube-api-access-x7xrl\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.064498 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9d79e045-9533-4d4b-bd78-fa0a5b707a53" (UID: "9d79e045-9533-4d4b-bd78-fa0a5b707a53"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.065335 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-config" (OuterVolumeSpecName: "config") pod "9d79e045-9533-4d4b-bd78-fa0a5b707a53" (UID: "9d79e045-9533-4d4b-bd78-fa0a5b707a53"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.105325 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9d79e045-9533-4d4b-bd78-fa0a5b707a53" (UID: "9d79e045-9533-4d4b-bd78-fa0a5b707a53"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.118886 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9d79e045-9533-4d4b-bd78-fa0a5b707a53" (UID: "9d79e045-9533-4d4b-bd78-fa0a5b707a53"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.163081 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.163123 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.163134 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.163145 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9d79e045-9533-4d4b-bd78-fa0a5b707a53-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.694843 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.788038 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjr8c\" (UniqueName: \"kubernetes.io/projected/24ac2816-d915-48c3-b75a-3f866aa46a43-kube-api-access-rjr8c\") pod \"24ac2816-d915-48c3-b75a-3f866aa46a43\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.788154 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-catalog-content\") pod \"24ac2816-d915-48c3-b75a-3f866aa46a43\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.788219 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-utilities\") pod \"24ac2816-d915-48c3-b75a-3f866aa46a43\" (UID: \"24ac2816-d915-48c3-b75a-3f866aa46a43\") " Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.797315 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-utilities" (OuterVolumeSpecName: "utilities") pod "24ac2816-d915-48c3-b75a-3f866aa46a43" (UID: "24ac2816-d915-48c3-b75a-3f866aa46a43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.820798 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24ac2816-d915-48c3-b75a-3f866aa46a43-kube-api-access-rjr8c" (OuterVolumeSpecName: "kube-api-access-rjr8c") pod "24ac2816-d915-48c3-b75a-3f866aa46a43" (UID: "24ac2816-d915-48c3-b75a-3f866aa46a43"). InnerVolumeSpecName "kube-api-access-rjr8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.838850 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-lkk6z"] Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.838909 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-789494c67c-djqgh"] Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.841581 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8njnt" event={"ID":"24ac2816-d915-48c3-b75a-3f866aa46a43","Type":"ContainerDied","Data":"ea51b5ad137b44712b408cbd575f06bd9ba0230dceee486be5e47a4f5f471633"} Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.841628 4948 scope.go:117] "RemoveContainer" containerID="23a254c510ad9724fbb174be37d080726f046614b0d6bab27ad7f7c41d29606f" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.841766 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8njnt" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.844251 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-l799c" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.844410 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m5tvw" event={"ID":"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7","Type":"ContainerStarted","Data":"6c0bd14abac4fb828bb9d5935b5f754c29928576bf685317f4221903188bef4d"} Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.895857 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjr8c\" (UniqueName: \"kubernetes.io/projected/24ac2816-d915-48c3-b75a-3f866aa46a43-kube-api-access-rjr8c\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.896684 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.936969 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-dchk5"] Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.940529 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "24ac2816-d915-48c3-b75a-3f866aa46a43" (UID: "24ac2816-d915-48c3-b75a-3f866aa46a43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.952410 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-l799c"] Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.956591 4948 scope.go:117] "RemoveContainer" containerID="8d6c7feb57504becceb7771eaf561c74bbe33a92945791a56c201dc290915db7" Jan 20 20:05:50 crc kubenswrapper[4948]: I0120 20:05:50.964113 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-l799c"] Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.001278 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/24ac2816-d915-48c3-b75a-3f866aa46a43-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.024224 4948 scope.go:117] "RemoveContainer" containerID="7c8e3bbb2b8de0291a990aebc3feba86bc46aad3f89c3dda453e7518c5b18980" Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.237039 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8njnt"] Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.273428 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8njnt"] Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.318526 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-99f6n"] Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.368926 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:05:51 crc kubenswrapper[4948]: W0120 20:05:51.392347 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cf14434_5ac6_4983_8abe_7305b182c92d.slice/crio-a44d30b75b642fc8df3424a754bafd81309f5f693cb36cc33a8d40e6be64690a WatchSource:0}: Error finding container a44d30b75b642fc8df3424a754bafd81309f5f693cb36cc33a8d40e6be64690a: Status 404 returned error can't find the container with id a44d30b75b642fc8df3424a754bafd81309f5f693cb36cc33a8d40e6be64690a Jan 20 20:05:51 crc kubenswrapper[4948]: W0120 20:05:51.489922 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a24a241_d8d2_484c_ae7b_436777e1fddd.slice/crio-80d6986ba2e1b9f9ea4a6f053d43c6bb0c9f7d90bf6f5fee7792198e05231092 WatchSource:0}: Error finding container 80d6986ba2e1b9f9ea4a6f053d43c6bb0c9f7d90bf6f5fee7792198e05231092: Status 404 returned error can't find the container with id 80d6986ba2e1b9f9ea4a6f053d43c6bb0c9f7d90bf6f5fee7792198e05231092 Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.490034 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-qxsld"] Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.499985 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-5dp57"] Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.553745 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-5rhgw"] Jan 20 20:05:51 crc kubenswrapper[4948]: W0120 20:05:51.566967 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c19042c_af73_4228_a686_15cb4f7365cf.slice/crio-9b6362b96f7426c0085c1916bf04e1f096a2afaf184ba4da1130b4d42379ad86 WatchSource:0}: Error finding container 9b6362b96f7426c0085c1916bf04e1f096a2afaf184ba4da1130b4d42379ad86: Status 404 returned error can't find the container with id 9b6362b96f7426c0085c1916bf04e1f096a2afaf184ba4da1130b4d42379ad86 Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.803007 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-57b75d5c69-bjxh7"] Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.869235 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m5tvw" event={"ID":"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7","Type":"ContainerStarted","Data":"198ead04e01000671cd4aa517213a35c4ae105bdad71c32c3dc17624585693bc"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.874273 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-789494c67c-djqgh" event={"ID":"152975f8-dda3-4343-8122-9d3506495970","Type":"ContainerStarted","Data":"14c56e68292228a33b8da3599738ce0b2ca540bf96b356d315805d077889916e"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.876494 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6cf14434-5ac6-4983-8abe-7305b182c92d","Type":"ContainerStarted","Data":"a44d30b75b642fc8df3424a754bafd81309f5f693cb36cc33a8d40e6be64690a"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.877987 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dchk5" event={"ID":"974e456e-61d1-4c5e-a8c9-9ebbb5246848","Type":"ContainerStarted","Data":"566e0d816ec12a3294bf5b34b925771c1b35726bf257c61e64de24434be4f13a"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.879270 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-qxsld" event={"ID":"4a24a241-d8d2-484c-ae7b-436777e1fddd","Type":"ContainerStarted","Data":"80d6986ba2e1b9f9ea4a6f053d43c6bb0c9f7d90bf6f5fee7792198e05231092"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.880301 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5dp57" event={"ID":"c4d16876-ed2f-4186-801c-48d52e01ac8c","Type":"ContainerStarted","Data":"383f92f19d7afddd162a3e8475b64cbd386d1b4a1adf021f608896faa7f45529"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.881830 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-99f6n" event={"ID":"0fa00dfc-b064-4964-a65d-80809492c96d","Type":"ContainerStarted","Data":"7df162546ce92f3033cd568fa11bf79468713e7d542cb0f2f2a72b825b7812b7"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.883254 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57b75d5c69-bjxh7" event={"ID":"c22039a6-695a-4abb-adcc-631c6703e03b","Type":"ContainerStarted","Data":"56ee7b8bf7c51d80a97d1a39d9a94847ca8f1a460217b0f3fc9f6a5928150ae3"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.884999 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" event={"ID":"2c19042c-af73-4228-a686-15cb4f7365cf","Type":"ContainerStarted","Data":"9b6362b96f7426c0085c1916bf04e1f096a2afaf184ba4da1130b4d42379ad86"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.890945 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-m5tvw" podStartSLOduration=4.8909203869999995 podStartE2EDuration="4.890920387s" podCreationTimestamp="2026-01-20 20:05:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:51.889461376 +0000 UTC m=+979.840186345" watchObservedRunningTime="2026-01-20 20:05:51.890920387 +0000 UTC m=+979.841645366" Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.922626 4948 generic.go:334] "Generic (PLEG): container finished" podID="f2718563-3639-4c91-abc9-0a7132d7cf7b" containerID="44e7b31cbe298adf0490fb5fbafdfd2682b5dbd107501170b3cb10959a6a3376" exitCode=0 Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.922681 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" event={"ID":"f2718563-3639-4c91-abc9-0a7132d7cf7b","Type":"ContainerDied","Data":"44e7b31cbe298adf0490fb5fbafdfd2682b5dbd107501170b3cb10959a6a3376"} Jan 20 20:05:51 crc kubenswrapper[4948]: I0120 20:05:51.922729 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" event={"ID":"f2718563-3639-4c91-abc9-0a7132d7cf7b","Type":"ContainerStarted","Data":"420a18ba0f61de5050412ed50ecf9cdb9cb400ee34586859d17e26b4977fcdf6"} Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.132882 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-789494c67c-djqgh"] Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.194651 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68c9db4489-g8s2q"] Jan 20 20:05:52 crc kubenswrapper[4948]: E0120 20:05:52.195157 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="registry-server" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.195178 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="registry-server" Jan 20 20:05:52 crc kubenswrapper[4948]: E0120 20:05:52.195193 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="extract-content" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.195201 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="extract-content" Jan 20 20:05:52 crc kubenswrapper[4948]: E0120 20:05:52.195212 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" containerName="init" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.195219 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" containerName="init" Jan 20 20:05:52 crc kubenswrapper[4948]: E0120 20:05:52.195234 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" containerName="dnsmasq-dns" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.195242 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" containerName="dnsmasq-dns" Jan 20 20:05:52 crc kubenswrapper[4948]: E0120 20:05:52.195259 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="extract-utilities" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.195266 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="extract-utilities" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.195477 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" containerName="registry-server" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.195510 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" containerName="dnsmasq-dns" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.196618 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.260330 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68c9db4489-g8s2q"] Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.332626 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.345935 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-scripts\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.346001 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2zfm\" (UniqueName: \"kubernetes.io/projected/da0e1e1a-77ab-4d97-8d9f-fd081e462573-kube-api-access-k2zfm\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.346062 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0e1e1a-77ab-4d97-8d9f-fd081e462573-logs\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.346085 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-config-data\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.346128 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da0e1e1a-77ab-4d97-8d9f-fd081e462573-horizon-secret-key\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.447513 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-scripts\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.447589 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2zfm\" (UniqueName: \"kubernetes.io/projected/da0e1e1a-77ab-4d97-8d9f-fd081e462573-kube-api-access-k2zfm\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.447744 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0e1e1a-77ab-4d97-8d9f-fd081e462573-logs\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.447819 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-config-data\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.447947 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da0e1e1a-77ab-4d97-8d9f-fd081e462573-horizon-secret-key\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.448281 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0e1e1a-77ab-4d97-8d9f-fd081e462573-logs\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.448796 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-scripts\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.449090 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-config-data\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.460515 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da0e1e1a-77ab-4d97-8d9f-fd081e462573-horizon-secret-key\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.468462 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2zfm\" (UniqueName: \"kubernetes.io/projected/da0e1e1a-77ab-4d97-8d9f-fd081e462573-kube-api-access-k2zfm\") pod \"horizon-68c9db4489-g8s2q\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.565613 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.644734 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24ac2816-d915-48c3-b75a-3f866aa46a43" path="/var/lib/kubelet/pods/24ac2816-d915-48c3-b75a-3f866aa46a43/volumes" Jan 20 20:05:52 crc kubenswrapper[4948]: I0120 20:05:52.652271 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d79e045-9533-4d4b-bd78-fa0a5b707a53" path="/var/lib/kubelet/pods/9d79e045-9533-4d4b-bd78-fa0a5b707a53/volumes" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:52.952349 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:52.997874 4948 generic.go:334] "Generic (PLEG): container finished" podID="2c19042c-af73-4228-a686-15cb4f7365cf" containerID="55f65a7dd9dac3467057d0e1c626cd0593cbf1797d4f0fc4a00f34c0668130c7" exitCode=0 Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:52.997968 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" event={"ID":"2c19042c-af73-4228-a686-15cb4f7365cf","Type":"ContainerDied","Data":"55f65a7dd9dac3467057d0e1c626cd0593cbf1797d4f0fc4a00f34c0668130c7"} Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.029979 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5dp57" event={"ID":"c4d16876-ed2f-4186-801c-48d52e01ac8c","Type":"ContainerStarted","Data":"21db9b1a1206ebafe6b573d97de0bc3713a5845e199b0d2d20cdcbbab3f1796d"} Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.060876 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.060915 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-lkk6z" event={"ID":"f2718563-3639-4c91-abc9-0a7132d7cf7b","Type":"ContainerDied","Data":"420a18ba0f61de5050412ed50ecf9cdb9cb400ee34586859d17e26b4977fcdf6"} Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.060964 4948 scope.go:117] "RemoveContainer" containerID="44e7b31cbe298adf0490fb5fbafdfd2682b5dbd107501170b3cb10959a6a3376" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.061338 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-svc\") pod \"f2718563-3639-4c91-abc9-0a7132d7cf7b\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.061404 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-nb\") pod \"f2718563-3639-4c91-abc9-0a7132d7cf7b\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.061435 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-config\") pod \"f2718563-3639-4c91-abc9-0a7132d7cf7b\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.061503 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-sb\") pod \"f2718563-3639-4c91-abc9-0a7132d7cf7b\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.061603 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-swift-storage-0\") pod \"f2718563-3639-4c91-abc9-0a7132d7cf7b\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.061675 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrc7k\" (UniqueName: \"kubernetes.io/projected/f2718563-3639-4c91-abc9-0a7132d7cf7b-kube-api-access-qrc7k\") pod \"f2718563-3639-4c91-abc9-0a7132d7cf7b\" (UID: \"f2718563-3639-4c91-abc9-0a7132d7cf7b\") " Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.074687 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2718563-3639-4c91-abc9-0a7132d7cf7b-kube-api-access-qrc7k" (OuterVolumeSpecName: "kube-api-access-qrc7k") pod "f2718563-3639-4c91-abc9-0a7132d7cf7b" (UID: "f2718563-3639-4c91-abc9-0a7132d7cf7b"). InnerVolumeSpecName "kube-api-access-qrc7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.104116 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f2718563-3639-4c91-abc9-0a7132d7cf7b" (UID: "f2718563-3639-4c91-abc9-0a7132d7cf7b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.143739 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f2718563-3639-4c91-abc9-0a7132d7cf7b" (UID: "f2718563-3639-4c91-abc9-0a7132d7cf7b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.152199 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f2718563-3639-4c91-abc9-0a7132d7cf7b" (UID: "f2718563-3639-4c91-abc9-0a7132d7cf7b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.154102 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-5dp57" podStartSLOduration=4.154084296 podStartE2EDuration="4.154084296s" podCreationTimestamp="2026-01-20 20:05:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:53.118324655 +0000 UTC m=+981.069049614" watchObservedRunningTime="2026-01-20 20:05:53.154084296 +0000 UTC m=+981.104809265" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.166344 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.166369 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.166377 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.166388 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrc7k\" (UniqueName: \"kubernetes.io/projected/f2718563-3639-4c91-abc9-0a7132d7cf7b-kube-api-access-qrc7k\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.169117 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f2718563-3639-4c91-abc9-0a7132d7cf7b" (UID: "f2718563-3639-4c91-abc9-0a7132d7cf7b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.173732 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-config" (OuterVolumeSpecName: "config") pod "f2718563-3639-4c91-abc9-0a7132d7cf7b" (UID: "f2718563-3639-4c91-abc9-0a7132d7cf7b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.327540 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.327858 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f2718563-3639-4c91-abc9-0a7132d7cf7b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.493198 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-lkk6z"] Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:53.517096 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-lkk6z"] Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:54.167994 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" event={"ID":"2c19042c-af73-4228-a686-15cb4f7365cf","Type":"ContainerStarted","Data":"ccc10d498e141427d768779e9420b8e9c911a45978e27249a8c3f3c1284e675b"} Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:54.168524 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:54.421125 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" podStartSLOduration=5.421104996 podStartE2EDuration="5.421104996s" podCreationTimestamp="2026-01-20 20:05:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:05:54.193288703 +0000 UTC m=+982.144013672" watchObservedRunningTime="2026-01-20 20:05:54.421104996 +0000 UTC m=+982.371829965" Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:54.431002 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68c9db4489-g8s2q"] Jan 20 20:05:54 crc kubenswrapper[4948]: W0120 20:05:54.436861 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda0e1e1a_77ab_4d97_8d9f_fd081e462573.slice/crio-36a4993a93dd195779b7b00cfd0ee148a334671f26f63c774f9f9fac8d5131a4 WatchSource:0}: Error finding container 36a4993a93dd195779b7b00cfd0ee148a334671f26f63c774f9f9fac8d5131a4: Status 404 returned error can't find the container with id 36a4993a93dd195779b7b00cfd0ee148a334671f26f63c774f9f9fac8d5131a4 Jan 20 20:05:54 crc kubenswrapper[4948]: I0120 20:05:54.614933 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2718563-3639-4c91-abc9-0a7132d7cf7b" path="/var/lib/kubelet/pods/f2718563-3639-4c91-abc9-0a7132d7cf7b/volumes" Jan 20 20:05:55 crc kubenswrapper[4948]: I0120 20:05:55.212597 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c9db4489-g8s2q" event={"ID":"da0e1e1a-77ab-4d97-8d9f-fd081e462573","Type":"ContainerStarted","Data":"36a4993a93dd195779b7b00cfd0ee148a334671f26f63c774f9f9fac8d5131a4"} Jan 20 20:05:57 crc kubenswrapper[4948]: I0120 20:05:57.241020 4948 generic.go:334] "Generic (PLEG): container finished" podID="d96cb8cd-dfa3-4d70-af44-be9627945b5f" containerID="5f03c6d62c705dccc787efee2f93f6e8d2b2f77510a812f0bc73e9f963f47546" exitCode=0 Jan 20 20:05:57 crc kubenswrapper[4948]: I0120 20:05:57.241136 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-fdwn2" event={"ID":"d96cb8cd-dfa3-4d70-af44-be9627945b5f","Type":"ContainerDied","Data":"5f03c6d62c705dccc787efee2f93f6e8d2b2f77510a812f0bc73e9f963f47546"} Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.257220 4948 generic.go:334] "Generic (PLEG): container finished" podID="12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" containerID="198ead04e01000671cd4aa517213a35c4ae105bdad71c32c3dc17624585693bc" exitCode=0 Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.257296 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m5tvw" event={"ID":"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7","Type":"ContainerDied","Data":"198ead04e01000671cd4aa517213a35c4ae105bdad71c32c3dc17624585693bc"} Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.848380 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-57b75d5c69-bjxh7"] Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.893169 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-68bc7c4fc6-4mkmv"] Jan 20 20:05:58 crc kubenswrapper[4948]: E0120 20:05:58.893926 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2718563-3639-4c91-abc9-0a7132d7cf7b" containerName="init" Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.893972 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2718563-3639-4c91-abc9-0a7132d7cf7b" containerName="init" Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.894311 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2718563-3639-4c91-abc9-0a7132d7cf7b" containerName="init" Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.895849 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.903092 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 20 20:05:58 crc kubenswrapper[4948]: I0120 20:05:58.906214 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68bc7c4fc6-4mkmv"] Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.006737 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68c9db4489-g8s2q"] Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.048136 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-67dd67cb9b-9w4wk"] Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.050578 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.084347 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67dd67cb9b-9w4wk"] Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.111990 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d2c0905-915e-4504-8454-ee3500220ab3-scripts\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112058 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-horizon-secret-key\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112115 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-combined-ca-bundle\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112149 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-combined-ca-bundle\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112240 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-config-data\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112271 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmw2q\" (UniqueName: \"kubernetes.io/projected/4d2c0905-915e-4504-8454-ee3500220ab3-kube-api-access-jmw2q\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112340 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-secret-key\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112389 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d2c0905-915e-4504-8454-ee3500220ab3-logs\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112423 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-scripts\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112443 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4d2c0905-915e-4504-8454-ee3500220ab3-config-data\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112478 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjmfr\" (UniqueName: \"kubernetes.io/projected/af522f17-3cad-4004-b112-51e47fa9fea7-kube-api-access-wjmfr\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112529 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-horizon-tls-certs\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112558 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af522f17-3cad-4004-b112-51e47fa9fea7-logs\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.112593 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-tls-certs\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.213812 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-tls-certs\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.213863 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d2c0905-915e-4504-8454-ee3500220ab3-scripts\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.213884 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-horizon-secret-key\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.214819 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4d2c0905-915e-4504-8454-ee3500220ab3-scripts\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.214884 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-combined-ca-bundle\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215592 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-combined-ca-bundle\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215654 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-config-data\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215677 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmw2q\" (UniqueName: \"kubernetes.io/projected/4d2c0905-915e-4504-8454-ee3500220ab3-kube-api-access-jmw2q\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215746 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-secret-key\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215800 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d2c0905-915e-4504-8454-ee3500220ab3-logs\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215829 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-scripts\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215846 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4d2c0905-915e-4504-8454-ee3500220ab3-config-data\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215918 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjmfr\" (UniqueName: \"kubernetes.io/projected/af522f17-3cad-4004-b112-51e47fa9fea7-kube-api-access-wjmfr\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.215964 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-horizon-tls-certs\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.216005 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af522f17-3cad-4004-b112-51e47fa9fea7-logs\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.216393 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af522f17-3cad-4004-b112-51e47fa9fea7-logs\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.217444 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-config-data\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.218174 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4d2c0905-915e-4504-8454-ee3500220ab3-logs\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.218272 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4d2c0905-915e-4504-8454-ee3500220ab3-config-data\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.220108 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-scripts\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.224122 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-combined-ca-bundle\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.225435 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-secret-key\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.233489 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-horizon-secret-key\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.237929 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-combined-ca-bundle\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.238566 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/4d2c0905-915e-4504-8454-ee3500220ab3-horizon-tls-certs\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.239656 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjmfr\" (UniqueName: \"kubernetes.io/projected/af522f17-3cad-4004-b112-51e47fa9fea7-kube-api-access-wjmfr\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.240334 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmw2q\" (UniqueName: \"kubernetes.io/projected/4d2c0905-915e-4504-8454-ee3500220ab3-kube-api-access-jmw2q\") pod \"horizon-67dd67cb9b-9w4wk\" (UID: \"4d2c0905-915e-4504-8454-ee3500220ab3\") " pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.261489 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-tls-certs\") pod \"horizon-68bc7c4fc6-4mkmv\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.391922 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.539068 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.848913 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.927077 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-s9krd"] Jan 20 20:05:59 crc kubenswrapper[4948]: I0120 20:05:59.927359 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-s9krd" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" containerID="cri-o://10c220feebb03a65e036f269bbe8754201aacf46d58778445755d547aafd1795" gracePeriod=10 Jan 20 20:06:00 crc kubenswrapper[4948]: I0120 20:06:00.320894 4948 generic.go:334] "Generic (PLEG): container finished" podID="6a31f534-f99e-4471-a17f-4630288d7353" containerID="10c220feebb03a65e036f269bbe8754201aacf46d58778445755d547aafd1795" exitCode=0 Jan 20 20:06:00 crc kubenswrapper[4948]: I0120 20:06:00.321233 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-s9krd" event={"ID":"6a31f534-f99e-4471-a17f-4630288d7353","Type":"ContainerDied","Data":"10c220feebb03a65e036f269bbe8754201aacf46d58778445755d547aafd1795"} Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.218332 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-fdwn2" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.332839 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-fdwn2" event={"ID":"d96cb8cd-dfa3-4d70-af44-be9627945b5f","Type":"ContainerDied","Data":"de457b35af9759c6a88ff8065b022d29ab38b2e0f7b211d2f321e65f604a8b14"} Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.332895 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de457b35af9759c6a88ff8065b022d29ab38b2e0f7b211d2f321e65f604a8b14" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.332893 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-fdwn2" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.368321 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-db-sync-config-data\") pod \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.368434 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57xcx\" (UniqueName: \"kubernetes.io/projected/d96cb8cd-dfa3-4d70-af44-be9627945b5f-kube-api-access-57xcx\") pod \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.368462 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-combined-ca-bundle\") pod \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.368576 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-config-data\") pod \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\" (UID: \"d96cb8cd-dfa3-4d70-af44-be9627945b5f\") " Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.391701 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d96cb8cd-dfa3-4d70-af44-be9627945b5f" (UID: "d96cb8cd-dfa3-4d70-af44-be9627945b5f"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.391795 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d96cb8cd-dfa3-4d70-af44-be9627945b5f-kube-api-access-57xcx" (OuterVolumeSpecName: "kube-api-access-57xcx") pod "d96cb8cd-dfa3-4d70-af44-be9627945b5f" (UID: "d96cb8cd-dfa3-4d70-af44-be9627945b5f"). InnerVolumeSpecName "kube-api-access-57xcx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.400391 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d96cb8cd-dfa3-4d70-af44-be9627945b5f" (UID: "d96cb8cd-dfa3-4d70-af44-be9627945b5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.435554 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-config-data" (OuterVolumeSpecName: "config-data") pod "d96cb8cd-dfa3-4d70-af44-be9627945b5f" (UID: "d96cb8cd-dfa3-4d70-af44-be9627945b5f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.470669 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.470728 4948 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.470744 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57xcx\" (UniqueName: \"kubernetes.io/projected/d96cb8cd-dfa3-4d70-af44-be9627945b5f-kube-api-access-57xcx\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:01 crc kubenswrapper[4948]: I0120 20:06:01.470756 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d96cb8cd-dfa3-4d70-af44-be9627945b5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.691360 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-l7hbz"] Jan 20 20:06:02 crc kubenswrapper[4948]: E0120 20:06:02.692810 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d96cb8cd-dfa3-4d70-af44-be9627945b5f" containerName="glance-db-sync" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.692940 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d96cb8cd-dfa3-4d70-af44-be9627945b5f" containerName="glance-db-sync" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.693256 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d96cb8cd-dfa3-4d70-af44-be9627945b5f" containerName="glance-db-sync" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.694561 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.717120 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-l7hbz"] Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.800370 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zv56b\" (UniqueName: \"kubernetes.io/projected/4c784c26-fcc8-47ae-a602-48d9a8faaa61-kube-api-access-zv56b\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.800485 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.800603 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.800650 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-config\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.800693 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.801041 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.902926 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zv56b\" (UniqueName: \"kubernetes.io/projected/4c784c26-fcc8-47ae-a602-48d9a8faaa61-kube-api-access-zv56b\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.903069 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.903189 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.903254 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-config\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.903299 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.903355 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.904112 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.904118 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.905667 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-config\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.906149 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.906171 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:02 crc kubenswrapper[4948]: I0120 20:06:02.953186 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zv56b\" (UniqueName: \"kubernetes.io/projected/4c784c26-fcc8-47ae-a602-48d9a8faaa61-kube-api-access-zv56b\") pod \"dnsmasq-dns-785d8bcb8c-l7hbz\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.047210 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.557353 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-s9krd" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: connect: connection refused" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.596261 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.598034 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.601199 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-96n9r" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.601547 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.603906 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.608590 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.855908 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.856327 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-scripts\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.856410 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-logs\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.856525 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.857059 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66vrv\" (UniqueName: \"kubernetes.io/projected/dad2f49d-a450-46ed-9d77-15cc21b04853-kube-api-access-66vrv\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.857117 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-config-data\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.857164 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.912126 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.913957 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.917817 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.934533 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.958758 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-logs\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.959742 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.959922 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66vrv\" (UniqueName: \"kubernetes.io/projected/dad2f49d-a450-46ed-9d77-15cc21b04853-kube-api-access-66vrv\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.960026 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-config-data\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.960178 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.960971 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-logs\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.961126 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.961153 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.961222 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-scripts\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.961497 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.968158 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-scripts\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.969751 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-config-data\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.970139 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:03 crc kubenswrapper[4948]: I0120 20:06:03.990548 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66vrv\" (UniqueName: \"kubernetes.io/projected/dad2f49d-a450-46ed-9d77-15cc21b04853-kube-api-access-66vrv\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.001991 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.063280 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.063357 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6j5dh\" (UniqueName: \"kubernetes.io/projected/b6093310-c438-49af-88b6-b14dd2a54a34-kube-api-access-6j5dh\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.063448 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.063508 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-logs\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.063567 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.063626 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.063668 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.165923 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166003 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6j5dh\" (UniqueName: \"kubernetes.io/projected/b6093310-c438-49af-88b6-b14dd2a54a34-kube-api-access-6j5dh\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166035 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166069 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-logs\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166127 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166209 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166374 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166621 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-logs\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166912 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.166457 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.171372 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.171471 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.173777 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.190865 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6j5dh\" (UniqueName: \"kubernetes.io/projected/b6093310-c438-49af-88b6-b14dd2a54a34-kube-api-access-6j5dh\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.195452 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.234045 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:06:04 crc kubenswrapper[4948]: I0120 20:06:04.239669 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:05 crc kubenswrapper[4948]: I0120 20:06:05.501364 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:05 crc kubenswrapper[4948]: I0120 20:06:05.598835 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:08 crc kubenswrapper[4948]: I0120 20:06:08.559440 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-s9krd" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: connect: connection refused" Jan 20 20:06:10 crc kubenswrapper[4948]: E0120 20:06:10.458821 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 20 20:06:10 crc kubenswrapper[4948]: E0120 20:06:10.459412 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfch586hb7h54bh59hffh59fh648h54fh8bh676h577h7ch654h58dh6h65dh547hb8h68hf7hcchfdh64bh596h5d9h5fbhd9h87h9fh696h549q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tkqw9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-789494c67c-djqgh_openstack(152975f8-dda3-4343-8122-9d3506495970): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:06:10 crc kubenswrapper[4948]: E0120 20:06:10.476186 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-789494c67c-djqgh" podUID="152975f8-dda3-4343-8122-9d3506495970" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.580115 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.601515 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-m5tvw" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.601656 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-m5tvw" event={"ID":"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7","Type":"ContainerDied","Data":"6c0bd14abac4fb828bb9d5935b5f754c29928576bf685317f4221903188bef4d"} Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.601812 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c0bd14abac4fb828bb9d5935b5f754c29928576bf685317f4221903188bef4d" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.661826 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w8z8\" (UniqueName: \"kubernetes.io/projected/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-kube-api-access-2w8z8\") pod \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.662153 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-credential-keys\") pod \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.662253 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-config-data\") pod \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.662315 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-scripts\") pod \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.662428 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-combined-ca-bundle\") pod \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.663336 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-fernet-keys\") pod \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\" (UID: \"12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7\") " Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.669305 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" (UID: "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.670566 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" (UID: "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.671980 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-kube-api-access-2w8z8" (OuterVolumeSpecName: "kube-api-access-2w8z8") pod "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" (UID: "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7"). InnerVolumeSpecName "kube-api-access-2w8z8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.694198 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-scripts" (OuterVolumeSpecName: "scripts") pod "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" (UID: "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.700325 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" (UID: "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.700441 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-config-data" (OuterVolumeSpecName: "config-data") pod "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" (UID: "12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.765948 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w8z8\" (UniqueName: \"kubernetes.io/projected/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-kube-api-access-2w8z8\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.765983 4948 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.765998 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.766010 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.766021 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:10 crc kubenswrapper[4948]: I0120 20:06:10.766030 4948 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.675782 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-m5tvw"] Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.683678 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-m5tvw"] Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.775872 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-hx7kj"] Jan 20 20:06:11 crc kubenswrapper[4948]: E0120 20:06:11.776243 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" containerName="keystone-bootstrap" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.776257 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" containerName="keystone-bootstrap" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.776436 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" containerName="keystone-bootstrap" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.776979 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.779203 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.779299 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.783536 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9zfkq" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.783629 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.785648 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.803928 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hx7kj"] Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.894463 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-credential-keys\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.894641 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-combined-ca-bundle\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.894683 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-fernet-keys\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.894761 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-scripts\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.894797 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sqgf\" (UniqueName: \"kubernetes.io/projected/c230d755-993f-4cc4-b387-992589975cc7-kube-api-access-5sqgf\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.894876 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-config-data\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.997019 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-combined-ca-bundle\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.997080 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-fernet-keys\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.997143 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-scripts\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.997180 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sqgf\" (UniqueName: \"kubernetes.io/projected/c230d755-993f-4cc4-b387-992589975cc7-kube-api-access-5sqgf\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.997292 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-config-data\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:11 crc kubenswrapper[4948]: I0120 20:06:11.997346 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-credential-keys\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:12 crc kubenswrapper[4948]: I0120 20:06:12.003725 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-combined-ca-bundle\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:12 crc kubenswrapper[4948]: I0120 20:06:12.004024 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-credential-keys\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:12 crc kubenswrapper[4948]: I0120 20:06:12.004111 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-fernet-keys\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:12 crc kubenswrapper[4948]: I0120 20:06:12.011218 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-scripts\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:12 crc kubenswrapper[4948]: I0120 20:06:12.011812 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-config-data\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:12 crc kubenswrapper[4948]: I0120 20:06:12.022483 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sqgf\" (UniqueName: \"kubernetes.io/projected/c230d755-993f-4cc4-b387-992589975cc7-kube-api-access-5sqgf\") pod \"keystone-bootstrap-hx7kj\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:12 crc kubenswrapper[4948]: I0120 20:06:12.102491 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:12 crc kubenswrapper[4948]: I0120 20:06:12.591363 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7" path="/var/lib/kubelet/pods/12b8d1d4-7d24-42d2-b8ce-8188fb7b1ed7/volumes" Jan 20 20:06:13 crc kubenswrapper[4948]: I0120 20:06:13.557299 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-s9krd" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: connect: connection refused" Jan 20 20:06:13 crc kubenswrapper[4948]: I0120 20:06:13.557864 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:06:14 crc kubenswrapper[4948]: E0120 20:06:14.667810 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 20 20:06:14 crc kubenswrapper[4948]: E0120 20:06:14.668402 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n668h5dh5c6h59dh56dh5fh596h67bh5c9h59dh54hfch68bh86hbch86hd4h5ddhc6h595h645hb4hf5h57fh658h8fh6chbh558h55h66hbq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hzcv9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-57b75d5c69-bjxh7_openstack(c22039a6-695a-4abb-adcc-631c6703e03b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:06:14 crc kubenswrapper[4948]: E0120 20:06:14.670970 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-57b75d5c69-bjxh7" podUID="c22039a6-695a-4abb-adcc-631c6703e03b" Jan 20 20:06:14 crc kubenswrapper[4948]: E0120 20:06:14.684790 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Jan 20 20:06:14 crc kubenswrapper[4948]: E0120 20:06:14.685008 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd6h74h594h5b9h557h564h98h54dh58dh59bh66dh5bbh8fh56dh56dh5c9h655hcchc5h578hb5h56bh699h5h558h65fhb5h587h5d6hdchc5h697q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k2zfm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-68c9db4489-g8s2q_openstack(da0e1e1a-77ab-4d97-8d9f-fd081e462573): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:06:14 crc kubenswrapper[4948]: E0120 20:06:14.688964 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-68c9db4489-g8s2q" podUID="da0e1e1a-77ab-4d97-8d9f-fd081e462573" Jan 20 20:06:22 crc kubenswrapper[4948]: I0120 20:06:22.940367 4948 generic.go:334] "Generic (PLEG): container finished" podID="c4d16876-ed2f-4186-801c-48d52e01ac8c" containerID="21db9b1a1206ebafe6b573d97de0bc3713a5845e199b0d2d20cdcbbab3f1796d" exitCode=0 Jan 20 20:06:22 crc kubenswrapper[4948]: I0120 20:06:22.940462 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5dp57" event={"ID":"c4d16876-ed2f-4186-801c-48d52e01ac8c","Type":"ContainerDied","Data":"21db9b1a1206ebafe6b573d97de0bc3713a5845e199b0d2d20cdcbbab3f1796d"} Jan 20 20:06:23 crc kubenswrapper[4948]: I0120 20:06:23.602816 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-s9krd" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.480549 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.620370 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/152975f8-dda3-4343-8122-9d3506495970-logs\") pod \"152975f8-dda3-4343-8122-9d3506495970\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.620894 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-scripts\") pod \"152975f8-dda3-4343-8122-9d3506495970\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.621017 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-config-data\") pod \"152975f8-dda3-4343-8122-9d3506495970\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.621216 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/152975f8-dda3-4343-8122-9d3506495970-horizon-secret-key\") pod \"152975f8-dda3-4343-8122-9d3506495970\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.621262 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkqw9\" (UniqueName: \"kubernetes.io/projected/152975f8-dda3-4343-8122-9d3506495970-kube-api-access-tkqw9\") pod \"152975f8-dda3-4343-8122-9d3506495970\" (UID: \"152975f8-dda3-4343-8122-9d3506495970\") " Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.622197 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/152975f8-dda3-4343-8122-9d3506495970-logs" (OuterVolumeSpecName: "logs") pod "152975f8-dda3-4343-8122-9d3506495970" (UID: "152975f8-dda3-4343-8122-9d3506495970"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.623126 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-config-data" (OuterVolumeSpecName: "config-data") pod "152975f8-dda3-4343-8122-9d3506495970" (UID: "152975f8-dda3-4343-8122-9d3506495970"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.623640 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-scripts" (OuterVolumeSpecName: "scripts") pod "152975f8-dda3-4343-8122-9d3506495970" (UID: "152975f8-dda3-4343-8122-9d3506495970"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.627822 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/152975f8-dda3-4343-8122-9d3506495970-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "152975f8-dda3-4343-8122-9d3506495970" (UID: "152975f8-dda3-4343-8122-9d3506495970"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.628671 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/152975f8-dda3-4343-8122-9d3506495970-kube-api-access-tkqw9" (OuterVolumeSpecName: "kube-api-access-tkqw9") pod "152975f8-dda3-4343-8122-9d3506495970" (UID: "152975f8-dda3-4343-8122-9d3506495970"). InnerVolumeSpecName "kube-api-access-tkqw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.725619 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.725652 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/152975f8-dda3-4343-8122-9d3506495970-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.725667 4948 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/152975f8-dda3-4343-8122-9d3506495970-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.725682 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkqw9\" (UniqueName: \"kubernetes.io/projected/152975f8-dda3-4343-8122-9d3506495970-kube-api-access-tkqw9\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:27 crc kubenswrapper[4948]: I0120 20:06:27.725694 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/152975f8-dda3-4343-8122-9d3506495970-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.002933 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-789494c67c-djqgh" event={"ID":"152975f8-dda3-4343-8122-9d3506495970","Type":"ContainerDied","Data":"14c56e68292228a33b8da3599738ce0b2ca540bf96b356d315805d077889916e"} Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.003024 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-789494c67c-djqgh" Jan 20 20:06:28 crc kubenswrapper[4948]: E0120 20:06:28.026967 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 20 20:06:28 crc kubenswrapper[4948]: E0120 20:06:28.027141 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wn6js,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-qxsld_openstack(4a24a241-d8d2-484c-ae7b-436777e1fddd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:06:28 crc kubenswrapper[4948]: E0120 20:06:28.028687 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-qxsld" podUID="4a24a241-d8d2-484c-ae7b-436777e1fddd" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.079305 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-789494c67c-djqgh"] Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.089881 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-789494c67c-djqgh"] Jan 20 20:06:28 crc kubenswrapper[4948]: E0120 20:06:28.399011 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Jan 20 20:06:28 crc kubenswrapper[4948]: E0120 20:06:28.399549 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n99h79h68ch597hcch56bh5b6h56fh6fh56bh566h75h55h5f7h5cbh57ch5d8h5c7h7dh94h9fh5cfh696h68bh694h58bh67h69h8h575h596h56q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q4qf6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(6cf14434-5ac6-4983-8abe-7305b182c92d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.582970 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="152975f8-dda3-4343-8122-9d3506495970" path="/var/lib/kubelet/pods/152975f8-dda3-4343-8122-9d3506495970/volumes" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.603192 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-s9krd" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.606278 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.622297 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5dp57" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.661818 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.666251 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.744766 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r64vw\" (UniqueName: \"kubernetes.io/projected/6a31f534-f99e-4471-a17f-4630288d7353-kube-api-access-r64vw\") pod \"6a31f534-f99e-4471-a17f-4630288d7353\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.744867 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-sb\") pod \"6a31f534-f99e-4471-a17f-4630288d7353\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.744897 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-nb\") pod \"6a31f534-f99e-4471-a17f-4630288d7353\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.745018 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-config\") pod \"6a31f534-f99e-4471-a17f-4630288d7353\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.745067 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-combined-ca-bundle\") pod \"c4d16876-ed2f-4186-801c-48d52e01ac8c\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.745117 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-config\") pod \"c4d16876-ed2f-4186-801c-48d52e01ac8c\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.745143 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-dns-svc\") pod \"6a31f534-f99e-4471-a17f-4630288d7353\" (UID: \"6a31f534-f99e-4471-a17f-4630288d7353\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.745163 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rhm8\" (UniqueName: \"kubernetes.io/projected/c4d16876-ed2f-4186-801c-48d52e01ac8c-kube-api-access-4rhm8\") pod \"c4d16876-ed2f-4186-801c-48d52e01ac8c\" (UID: \"c4d16876-ed2f-4186-801c-48d52e01ac8c\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.750688 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4d16876-ed2f-4186-801c-48d52e01ac8c-kube-api-access-4rhm8" (OuterVolumeSpecName: "kube-api-access-4rhm8") pod "c4d16876-ed2f-4186-801c-48d52e01ac8c" (UID: "c4d16876-ed2f-4186-801c-48d52e01ac8c"). InnerVolumeSpecName "kube-api-access-4rhm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.752568 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a31f534-f99e-4471-a17f-4630288d7353-kube-api-access-r64vw" (OuterVolumeSpecName: "kube-api-access-r64vw") pod "6a31f534-f99e-4471-a17f-4630288d7353" (UID: "6a31f534-f99e-4471-a17f-4630288d7353"). InnerVolumeSpecName "kube-api-access-r64vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.776980 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-config" (OuterVolumeSpecName: "config") pod "c4d16876-ed2f-4186-801c-48d52e01ac8c" (UID: "c4d16876-ed2f-4186-801c-48d52e01ac8c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.797289 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6a31f534-f99e-4471-a17f-4630288d7353" (UID: "6a31f534-f99e-4471-a17f-4630288d7353"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.803328 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-config" (OuterVolumeSpecName: "config") pod "6a31f534-f99e-4471-a17f-4630288d7353" (UID: "6a31f534-f99e-4471-a17f-4630288d7353"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.805953 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c4d16876-ed2f-4186-801c-48d52e01ac8c" (UID: "c4d16876-ed2f-4186-801c-48d52e01ac8c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.814974 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6a31f534-f99e-4471-a17f-4630288d7353" (UID: "6a31f534-f99e-4471-a17f-4630288d7353"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.815672 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6a31f534-f99e-4471-a17f-4630288d7353" (UID: "6a31f534-f99e-4471-a17f-4630288d7353"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846246 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-config-data\") pod \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846318 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzcv9\" (UniqueName: \"kubernetes.io/projected/c22039a6-695a-4abb-adcc-631c6703e03b-kube-api-access-hzcv9\") pod \"c22039a6-695a-4abb-adcc-631c6703e03b\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846369 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2zfm\" (UniqueName: \"kubernetes.io/projected/da0e1e1a-77ab-4d97-8d9f-fd081e462573-kube-api-access-k2zfm\") pod \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846502 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c22039a6-695a-4abb-adcc-631c6703e03b-logs\") pod \"c22039a6-695a-4abb-adcc-631c6703e03b\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846528 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-scripts\") pod \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846565 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-config-data\") pod \"c22039a6-695a-4abb-adcc-631c6703e03b\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846619 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-scripts\") pod \"c22039a6-695a-4abb-adcc-631c6703e03b\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846680 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0e1e1a-77ab-4d97-8d9f-fd081e462573-logs\") pod \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846807 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c22039a6-695a-4abb-adcc-631c6703e03b-horizon-secret-key\") pod \"c22039a6-695a-4abb-adcc-631c6703e03b\" (UID: \"c22039a6-695a-4abb-adcc-631c6703e03b\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.846841 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da0e1e1a-77ab-4d97-8d9f-fd081e462573-horizon-secret-key\") pod \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\" (UID: \"da0e1e1a-77ab-4d97-8d9f-fd081e462573\") " Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847069 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-scripts" (OuterVolumeSpecName: "scripts") pod "da0e1e1a-77ab-4d97-8d9f-fd081e462573" (UID: "da0e1e1a-77ab-4d97-8d9f-fd081e462573"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847223 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-config-data" (OuterVolumeSpecName: "config-data") pod "da0e1e1a-77ab-4d97-8d9f-fd081e462573" (UID: "da0e1e1a-77ab-4d97-8d9f-fd081e462573"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847319 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c22039a6-695a-4abb-adcc-631c6703e03b-logs" (OuterVolumeSpecName: "logs") pod "c22039a6-695a-4abb-adcc-631c6703e03b" (UID: "c22039a6-695a-4abb-adcc-631c6703e03b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847398 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847416 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847426 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847437 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/c4d16876-ed2f-4186-801c-48d52e01ac8c-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847445 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847455 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rhm8\" (UniqueName: \"kubernetes.io/projected/c4d16876-ed2f-4186-801c-48d52e01ac8c-kube-api-access-4rhm8\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847464 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r64vw\" (UniqueName: \"kubernetes.io/projected/6a31f534-f99e-4471-a17f-4630288d7353-kube-api-access-r64vw\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847472 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da0e1e1a-77ab-4d97-8d9f-fd081e462573-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847480 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847490 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a31f534-f99e-4471-a17f-4630288d7353-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.847544 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da0e1e1a-77ab-4d97-8d9f-fd081e462573-logs" (OuterVolumeSpecName: "logs") pod "da0e1e1a-77ab-4d97-8d9f-fd081e462573" (UID: "da0e1e1a-77ab-4d97-8d9f-fd081e462573"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.848029 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-scripts" (OuterVolumeSpecName: "scripts") pod "c22039a6-695a-4abb-adcc-631c6703e03b" (UID: "c22039a6-695a-4abb-adcc-631c6703e03b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.848113 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-config-data" (OuterVolumeSpecName: "config-data") pod "c22039a6-695a-4abb-adcc-631c6703e03b" (UID: "c22039a6-695a-4abb-adcc-631c6703e03b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.849045 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da0e1e1a-77ab-4d97-8d9f-fd081e462573-kube-api-access-k2zfm" (OuterVolumeSpecName: "kube-api-access-k2zfm") pod "da0e1e1a-77ab-4d97-8d9f-fd081e462573" (UID: "da0e1e1a-77ab-4d97-8d9f-fd081e462573"). InnerVolumeSpecName "kube-api-access-k2zfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.850376 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c22039a6-695a-4abb-adcc-631c6703e03b-kube-api-access-hzcv9" (OuterVolumeSpecName: "kube-api-access-hzcv9") pod "c22039a6-695a-4abb-adcc-631c6703e03b" (UID: "c22039a6-695a-4abb-adcc-631c6703e03b"). InnerVolumeSpecName "kube-api-access-hzcv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.850853 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c22039a6-695a-4abb-adcc-631c6703e03b-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "c22039a6-695a-4abb-adcc-631c6703e03b" (UID: "c22039a6-695a-4abb-adcc-631c6703e03b"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.851144 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da0e1e1a-77ab-4d97-8d9f-fd081e462573-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "da0e1e1a-77ab-4d97-8d9f-fd081e462573" (UID: "da0e1e1a-77ab-4d97-8d9f-fd081e462573"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.949354 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzcv9\" (UniqueName: \"kubernetes.io/projected/c22039a6-695a-4abb-adcc-631c6703e03b-kube-api-access-hzcv9\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.949391 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2zfm\" (UniqueName: \"kubernetes.io/projected/da0e1e1a-77ab-4d97-8d9f-fd081e462573-kube-api-access-k2zfm\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.949401 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c22039a6-695a-4abb-adcc-631c6703e03b-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.949411 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.949420 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c22039a6-695a-4abb-adcc-631c6703e03b-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.949428 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0e1e1a-77ab-4d97-8d9f-fd081e462573-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.949436 4948 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c22039a6-695a-4abb-adcc-631c6703e03b-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:28 crc kubenswrapper[4948]: I0120 20:06:28.949446 4948 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/da0e1e1a-77ab-4d97-8d9f-fd081e462573-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.017838 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-57b75d5c69-bjxh7" event={"ID":"c22039a6-695a-4abb-adcc-631c6703e03b","Type":"ContainerDied","Data":"56ee7b8bf7c51d80a97d1a39d9a94847ca8f1a460217b0f3fc9f6a5928150ae3"} Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.018044 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-57b75d5c69-bjxh7" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.021849 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68c9db4489-g8s2q" event={"ID":"da0e1e1a-77ab-4d97-8d9f-fd081e462573","Type":"ContainerDied","Data":"36a4993a93dd195779b7b00cfd0ee148a334671f26f63c774f9f9fac8d5131a4"} Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.022014 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68c9db4489-g8s2q" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.024040 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-5dp57" event={"ID":"c4d16876-ed2f-4186-801c-48d52e01ac8c","Type":"ContainerDied","Data":"383f92f19d7afddd162a3e8475b64cbd386d1b4a1adf021f608896faa7f45529"} Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.024081 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="383f92f19d7afddd162a3e8475b64cbd386d1b4a1adf021f608896faa7f45529" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.024111 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-5dp57" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.028017 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-s9krd" event={"ID":"6a31f534-f99e-4471-a17f-4630288d7353","Type":"ContainerDied","Data":"891a6bfe2dbdf40e170ff948217ed9033207f2476224f6e4044bee867744df2c"} Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.028062 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-s9krd" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.028083 4948 scope.go:117] "RemoveContainer" containerID="10c220feebb03a65e036f269bbe8754201aacf46d58778445755d547aafd1795" Jan 20 20:06:29 crc kubenswrapper[4948]: E0120 20:06:29.036975 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-qxsld" podUID="4a24a241-d8d2-484c-ae7b-436777e1fddd" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.170805 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-57b75d5c69-bjxh7"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.179446 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-57b75d5c69-bjxh7"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.203251 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-s9krd"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.214957 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-s9krd"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.234172 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68c9db4489-g8s2q"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.243885 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-68c9db4489-g8s2q"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.830925 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-l7hbz"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.910657 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qvbf9"] Jan 20 20:06:29 crc kubenswrapper[4948]: E0120 20:06:29.912645 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="init" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.912865 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="init" Jan 20 20:06:29 crc kubenswrapper[4948]: E0120 20:06:29.913062 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4d16876-ed2f-4186-801c-48d52e01ac8c" containerName="neutron-db-sync" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.913163 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4d16876-ed2f-4186-801c-48d52e01ac8c" containerName="neutron-db-sync" Jan 20 20:06:29 crc kubenswrapper[4948]: E0120 20:06:29.913258 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.913338 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.913943 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.914086 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4d16876-ed2f-4186-801c-48d52e01ac8c" containerName="neutron-db-sync" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.925761 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.941457 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qvbf9"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.956846 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5656668848-wwxxb"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.958625 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.967521 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.967808 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.968002 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-r9l27" Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.968550 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5656668848-wwxxb"] Jan 20 20:06:29 crc kubenswrapper[4948]: I0120 20:06:29.968906 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083643 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-config\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083691 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-ovndb-tls-certs\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083762 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g5hr\" (UniqueName: \"kubernetes.io/projected/168fa071-a608-4772-8013-f0fee67843a4-kube-api-access-4g5hr\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083821 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083846 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-config\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083882 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-httpd-config\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083924 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q95jl\" (UniqueName: \"kubernetes.io/projected/40932965-aaf9-44be-8d0e-23a7cba8f60a-kube-api-access-q95jl\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083947 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083962 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-combined-ca-bundle\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.083985 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.084010 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.185909 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-config\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.185965 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-ovndb-tls-certs\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.185995 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g5hr\" (UniqueName: \"kubernetes.io/projected/168fa071-a608-4772-8013-f0fee67843a4-kube-api-access-4g5hr\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.186055 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.186078 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-config\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.186107 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-httpd-config\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.186160 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q95jl\" (UniqueName: \"kubernetes.io/projected/40932965-aaf9-44be-8d0e-23a7cba8f60a-kube-api-access-q95jl\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.186183 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.186208 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-combined-ca-bundle\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.186238 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.186272 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.188473 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.188945 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-svc\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.189191 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.189213 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-config\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.193095 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-httpd-config\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.203065 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-config\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.209175 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g5hr\" (UniqueName: \"kubernetes.io/projected/168fa071-a608-4772-8013-f0fee67843a4-kube-api-access-4g5hr\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.210881 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-ovndb-tls-certs\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.211638 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-combined-ca-bundle\") pod \"neutron-5656668848-wwxxb\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.211829 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.219408 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q95jl\" (UniqueName: \"kubernetes.io/projected/40932965-aaf9-44be-8d0e-23a7cba8f60a-kube-api-access-q95jl\") pod \"dnsmasq-dns-55f844cf75-qvbf9\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.250560 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.288120 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.579023 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a31f534-f99e-4471-a17f-4630288d7353" path="/var/lib/kubelet/pods/6a31f534-f99e-4471-a17f-4630288d7353/volumes" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.579694 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c22039a6-695a-4abb-adcc-631c6703e03b" path="/var/lib/kubelet/pods/c22039a6-695a-4abb-adcc-631c6703e03b/volumes" Jan 20 20:06:30 crc kubenswrapper[4948]: I0120 20:06:30.580585 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da0e1e1a-77ab-4d97-8d9f-fd081e462573" path="/var/lib/kubelet/pods/da0e1e1a-77ab-4d97-8d9f-fd081e462573/volumes" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.035584 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-79d47bbd4f-rpj54"] Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.037430 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.041258 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.041489 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.058635 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-79d47bbd4f-rpj54"] Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.225205 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-combined-ca-bundle\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.225266 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-public-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.225301 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msllw\" (UniqueName: \"kubernetes.io/projected/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-kube-api-access-msllw\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.225414 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-httpd-config\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.225452 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-ovndb-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.225521 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-config\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.225568 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-internal-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.327212 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-config\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.327581 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-internal-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.327765 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-combined-ca-bundle\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.327866 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-public-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.327962 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msllw\" (UniqueName: \"kubernetes.io/projected/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-kube-api-access-msllw\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.328080 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-httpd-config\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.328175 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-ovndb-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.337471 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-internal-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.342491 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-public-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.352498 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-config\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.357481 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-ovndb-tls-certs\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.357588 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-httpd-config\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.360562 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-combined-ca-bundle\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.361901 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msllw\" (UniqueName: \"kubernetes.io/projected/4005ab42-8a7a-4951-ba75-b1f7a3d2a063-kube-api-access-msllw\") pod \"neutron-79d47bbd4f-rpj54\" (UID: \"4005ab42-8a7a-4951-ba75-b1f7a3d2a063\") " pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:32 crc kubenswrapper[4948]: E0120 20:06:32.618519 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 20 20:06:32 crc kubenswrapper[4948]: E0120 20:06:32.619244 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gk68v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-dchk5_openstack(974e456e-61d1-4c5e-a8c9-9ebbb5246848): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:06:32 crc kubenswrapper[4948]: E0120 20:06:32.621802 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-dchk5" podUID="974e456e-61d1-4c5e-a8c9-9ebbb5246848" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.646256 4948 scope.go:117] "RemoveContainer" containerID="27137d022dd88abfc6ff794f1a1c3042741eab6ed11987f0c2beb7e54518d22b" Jan 20 20:06:32 crc kubenswrapper[4948]: I0120 20:06:32.658185 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.079297 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-99f6n" event={"ID":"0fa00dfc-b064-4964-a65d-80809492c96d","Type":"ContainerStarted","Data":"41b9099addc835da529df8f16b3a0f3f4ac28f84f9ca1ab4cb080c170810471b"} Jan 20 20:06:33 crc kubenswrapper[4948]: E0120 20:06:33.105970 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-dchk5" podUID="974e456e-61d1-4c5e-a8c9-9ebbb5246848" Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.116185 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-99f6n" podStartSLOduration=7.997892373 podStartE2EDuration="45.116161265s" podCreationTimestamp="2026-01-20 20:05:48 +0000 UTC" firstStartedPulling="2026-01-20 20:05:51.324653134 +0000 UTC m=+979.275378103" lastFinishedPulling="2026-01-20 20:06:28.442922026 +0000 UTC m=+1016.393646995" observedRunningTime="2026-01-20 20:06:33.106196233 +0000 UTC m=+1021.056921202" watchObservedRunningTime="2026-01-20 20:06:33.116161265 +0000 UTC m=+1021.066886234" Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.133625 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67dd67cb9b-9w4wk"] Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.423935 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-l7hbz"] Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.436188 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-68bc7c4fc6-4mkmv"] Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.590229 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.603344 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-s9krd" podUID="6a31f534-f99e-4471-a17f-4630288d7353" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 20 20:06:33 crc kubenswrapper[4948]: W0120 20:06:33.622572 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddad2f49d_a450_46ed_9d77_15cc21b04853.slice/crio-6d5d7a2081807480cbd7dea602737d2d78aa4d732ff28f189521aee750183de4 WatchSource:0}: Error finding container 6d5d7a2081807480cbd7dea602737d2d78aa4d732ff28f189521aee750183de4: Status 404 returned error can't find the container with id 6d5d7a2081807480cbd7dea602737d2d78aa4d732ff28f189521aee750183de4 Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.662212 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qvbf9"] Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.683800 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hx7kj"] Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.692505 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.782570 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-79d47bbd4f-rpj54"] Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.820746 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 20 20:06:33 crc kubenswrapper[4948]: I0120 20:06:33.906785 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5656668848-wwxxb"] Jan 20 20:06:33 crc kubenswrapper[4948]: W0120 20:06:33.959261 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod168fa071_a608_4772_8013_f0fee67843a4.slice/crio-8e5897fc437e203533acffdee71fddb47611dfebec0c8653e74cf221d85bd0e4 WatchSource:0}: Error finding container 8e5897fc437e203533acffdee71fddb47611dfebec0c8653e74cf221d85bd0e4: Status 404 returned error can't find the container with id 8e5897fc437e203533acffdee71fddb47611dfebec0c8653e74cf221d85bd0e4 Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.123888 4948 generic.go:334] "Generic (PLEG): container finished" podID="4c784c26-fcc8-47ae-a602-48d9a8faaa61" containerID="b226b1b47eeafe597693786cf6e264edd1e60acff7f2ade8afc3e0d6ce4e1b2a" exitCode=0 Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.123979 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" event={"ID":"4c784c26-fcc8-47ae-a602-48d9a8faaa61","Type":"ContainerDied","Data":"b226b1b47eeafe597693786cf6e264edd1e60acff7f2ade8afc3e0d6ce4e1b2a"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.124014 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" event={"ID":"4c784c26-fcc8-47ae-a602-48d9a8faaa61","Type":"ContainerStarted","Data":"52def139707cd13624689b39d7e19eec60054666bb5f23372407f605990e42d2"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.136460 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5656668848-wwxxb" event={"ID":"168fa071-a608-4772-8013-f0fee67843a4","Type":"ContainerStarted","Data":"8e5897fc437e203533acffdee71fddb47611dfebec0c8653e74cf221d85bd0e4"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.141116 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b6093310-c438-49af-88b6-b14dd2a54a34","Type":"ContainerStarted","Data":"a8db43b7a3b64e0bf24e1317d82a08334136f0d4d66a60a4d1cc5ce10f39b40e"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.173109 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dd67cb9b-9w4wk" event={"ID":"4d2c0905-915e-4504-8454-ee3500220ab3","Type":"ContainerStarted","Data":"d9ba582105d9aba85e85ead75db83d9e35dc5e0b32470039eaef9f3abdb20921"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.184778 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dad2f49d-a450-46ed-9d77-15cc21b04853","Type":"ContainerStarted","Data":"6d5d7a2081807480cbd7dea602737d2d78aa4d732ff28f189521aee750183de4"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.186443 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerStarted","Data":"d06b8f94f0291b54cfb083803fd5b146b483e1fab43f2786bc947a6f421aca66"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.204618 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" event={"ID":"40932965-aaf9-44be-8d0e-23a7cba8f60a","Type":"ContainerStarted","Data":"6c2186b11676105a97b7c5433ddbb1b6b055f8bd023af00fb3e110e43e945db6"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.211653 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hx7kj" event={"ID":"c230d755-993f-4cc4-b387-992589975cc7","Type":"ContainerStarted","Data":"249ccbc6ee7c339d5d8bb4c43c4a6cff0720ca898fb38f3ffbbdcb7423977c33"} Jan 20 20:06:34 crc kubenswrapper[4948]: I0120 20:06:34.215835 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79d47bbd4f-rpj54" event={"ID":"4005ab42-8a7a-4951-ba75-b1f7a3d2a063","Type":"ContainerStarted","Data":"5b30d84165c329b0763e921912bec9ee444b66e8e6ad5f909f3f8255e15be586"} Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:34.999987 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.127597 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-svc\") pod \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.128008 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-sb\") pod \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.128190 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-nb\") pod \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.128220 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-swift-storage-0\") pod \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.128285 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zv56b\" (UniqueName: \"kubernetes.io/projected/4c784c26-fcc8-47ae-a602-48d9a8faaa61-kube-api-access-zv56b\") pod \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.128313 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-config\") pod \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\" (UID: \"4c784c26-fcc8-47ae-a602-48d9a8faaa61\") " Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.147344 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c784c26-fcc8-47ae-a602-48d9a8faaa61-kube-api-access-zv56b" (OuterVolumeSpecName: "kube-api-access-zv56b") pod "4c784c26-fcc8-47ae-a602-48d9a8faaa61" (UID: "4c784c26-fcc8-47ae-a602-48d9a8faaa61"). InnerVolumeSpecName "kube-api-access-zv56b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.232515 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zv56b\" (UniqueName: \"kubernetes.io/projected/4c784c26-fcc8-47ae-a602-48d9a8faaa61-kube-api-access-zv56b\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.234343 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79d47bbd4f-rpj54" event={"ID":"4005ab42-8a7a-4951-ba75-b1f7a3d2a063","Type":"ContainerStarted","Data":"136d24a824946275a0c296bed68f1ea25118b783da31c99e1ccf6e311abe2d8a"} Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.237647 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b6093310-c438-49af-88b6-b14dd2a54a34","Type":"ContainerStarted","Data":"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de"} Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.243044 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" event={"ID":"4c784c26-fcc8-47ae-a602-48d9a8faaa61","Type":"ContainerDied","Data":"52def139707cd13624689b39d7e19eec60054666bb5f23372407f605990e42d2"} Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.243122 4948 scope.go:117] "RemoveContainer" containerID="b226b1b47eeafe597693786cf6e264edd1e60acff7f2ade8afc3e0d6ce4e1b2a" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.243243 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-l7hbz" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.249595 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dd67cb9b-9w4wk" event={"ID":"4d2c0905-915e-4504-8454-ee3500220ab3","Type":"ContainerStarted","Data":"ef1f007d7fc5614411ba8e3e8c49bdc7953f1d70362f0a93f297b8abf847f7ae"} Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.254337 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dad2f49d-a450-46ed-9d77-15cc21b04853","Type":"ContainerStarted","Data":"8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399"} Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.259818 4948 generic.go:334] "Generic (PLEG): container finished" podID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerID="d592504d8c0a6f9a38e08f7fe6cb01a68ac263f89b75bd519dd5859a5418ae56" exitCode=0 Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.259870 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" event={"ID":"40932965-aaf9-44be-8d0e-23a7cba8f60a","Type":"ContainerDied","Data":"d592504d8c0a6f9a38e08f7fe6cb01a68ac263f89b75bd519dd5859a5418ae56"} Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.330984 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4c784c26-fcc8-47ae-a602-48d9a8faaa61" (UID: "4c784c26-fcc8-47ae-a602-48d9a8faaa61"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.332367 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4c784c26-fcc8-47ae-a602-48d9a8faaa61" (UID: "4c784c26-fcc8-47ae-a602-48d9a8faaa61"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.345621 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4c784c26-fcc8-47ae-a602-48d9a8faaa61" (UID: "4c784c26-fcc8-47ae-a602-48d9a8faaa61"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.350532 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-config" (OuterVolumeSpecName: "config") pod "4c784c26-fcc8-47ae-a602-48d9a8faaa61" (UID: "4c784c26-fcc8-47ae-a602-48d9a8faaa61"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.351253 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.351267 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.351276 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.351284 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.363284 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4c784c26-fcc8-47ae-a602-48d9a8faaa61" (UID: "4c784c26-fcc8-47ae-a602-48d9a8faaa61"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.453977 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4c784c26-fcc8-47ae-a602-48d9a8faaa61-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.830788 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-l7hbz"] Jan 20 20:06:35 crc kubenswrapper[4948]: I0120 20:06:35.846623 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-l7hbz"] Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.288205 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" event={"ID":"40932965-aaf9-44be-8d0e-23a7cba8f60a","Type":"ContainerStarted","Data":"7f7e235466d04e56bb30af71494aca05f50c25feea4f98a3876fbdb6429db220"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.289430 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.313555 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b6093310-c438-49af-88b6-b14dd2a54a34","Type":"ContainerStarted","Data":"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.313755 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" containerName="glance-log" containerID="cri-o://e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de" gracePeriod=30 Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.313695 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" podStartSLOduration=7.313665833 podStartE2EDuration="7.313665833s" podCreationTimestamp="2026-01-20 20:06:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:36.312069658 +0000 UTC m=+1024.262794627" watchObservedRunningTime="2026-01-20 20:06:36.313665833 +0000 UTC m=+1024.264390802" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.314094 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" containerName="glance-httpd" containerID="cri-o://3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4" gracePeriod=30 Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.383442 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dd67cb9b-9w4wk" event={"ID":"4d2c0905-915e-4504-8454-ee3500220ab3","Type":"ContainerStarted","Data":"08d9c3660e3ecd0832afba6cf5911a8e8427e7bed01955d0e134ac074a19a3f1"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.387942 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=34.387924743 podStartE2EDuration="34.387924743s" podCreationTimestamp="2026-01-20 20:06:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:36.373489625 +0000 UTC m=+1024.324214594" watchObservedRunningTime="2026-01-20 20:06:36.387924743 +0000 UTC m=+1024.338649712" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.433130 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5656668848-wwxxb" event={"ID":"168fa071-a608-4772-8013-f0fee67843a4","Type":"ContainerStarted","Data":"7124509677e848ae63f0a0e9b27eb09c2c49e5b152c91392048787b8ee7f6820"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.433179 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5656668848-wwxxb" event={"ID":"168fa071-a608-4772-8013-f0fee67843a4","Type":"ContainerStarted","Data":"c55ffc95d603f995af1d5ccf5e770b53298103459d5435f8224252f2a6bec3ae"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.441225 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.471358 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerStarted","Data":"6adfd927e96ecfa6c7b6a841fa85196a4b50ebb518e1b96beb40195708ccb40c"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.507688 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hx7kj" event={"ID":"c230d755-993f-4cc4-b387-992589975cc7","Type":"ContainerStarted","Data":"5c8cff267eece054abb0bed6f832e21378d67433d0359d0efa0a1e57c0898ede"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.538319 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-67dd67cb9b-9w4wk" podStartSLOduration=36.84180697 podStartE2EDuration="37.538289555s" podCreationTimestamp="2026-01-20 20:05:59 +0000 UTC" firstStartedPulling="2026-01-20 20:06:33.16052332 +0000 UTC m=+1021.111248289" lastFinishedPulling="2026-01-20 20:06:33.857005905 +0000 UTC m=+1021.807730874" observedRunningTime="2026-01-20 20:06:36.422075209 +0000 UTC m=+1024.372800178" watchObservedRunningTime="2026-01-20 20:06:36.538289555 +0000 UTC m=+1024.489014524" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.540758 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5656668848-wwxxb" podStartSLOduration=7.540744274 podStartE2EDuration="7.540744274s" podCreationTimestamp="2026-01-20 20:06:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:36.490255747 +0000 UTC m=+1024.440980736" watchObservedRunningTime="2026-01-20 20:06:36.540744274 +0000 UTC m=+1024.491469253" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.548114 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6cf14434-5ac6-4983-8abe-7305b182c92d","Type":"ContainerStarted","Data":"c7008d934d23533401eb78ae14168e519b7174e79007eb1e219bd4edca5be4ef"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.550129 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-79d47bbd4f-rpj54" event={"ID":"4005ab42-8a7a-4951-ba75-b1f7a3d2a063","Type":"ContainerStarted","Data":"10d251eb828554b55f22ebbd66acfe321f2ce85548bd3d6010af9035faaa1ae4"} Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.551035 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.568805 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-hx7kj" podStartSLOduration=25.568729606 podStartE2EDuration="25.568729606s" podCreationTimestamp="2026-01-20 20:06:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:36.566028349 +0000 UTC m=+1024.516753318" watchObservedRunningTime="2026-01-20 20:06:36.568729606 +0000 UTC m=+1024.519454595" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.583729 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c784c26-fcc8-47ae-a602-48d9a8faaa61" path="/var/lib/kubelet/pods/4c784c26-fcc8-47ae-a602-48d9a8faaa61/volumes" Jan 20 20:06:36 crc kubenswrapper[4948]: I0120 20:06:36.592405 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-79d47bbd4f-rpj54" podStartSLOduration=4.592384555 podStartE2EDuration="4.592384555s" podCreationTimestamp="2026-01-20 20:06:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:36.591056597 +0000 UTC m=+1024.541781566" watchObservedRunningTime="2026-01-20 20:06:36.592384555 +0000 UTC m=+1024.543109524" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.107592 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.204256 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-logs\") pod \"b6093310-c438-49af-88b6-b14dd2a54a34\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.204311 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-config-data\") pod \"b6093310-c438-49af-88b6-b14dd2a54a34\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.204340 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-combined-ca-bundle\") pod \"b6093310-c438-49af-88b6-b14dd2a54a34\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.204538 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-httpd-run\") pod \"b6093310-c438-49af-88b6-b14dd2a54a34\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.204555 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"b6093310-c438-49af-88b6-b14dd2a54a34\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.204588 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6j5dh\" (UniqueName: \"kubernetes.io/projected/b6093310-c438-49af-88b6-b14dd2a54a34-kube-api-access-6j5dh\") pod \"b6093310-c438-49af-88b6-b14dd2a54a34\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.204620 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-scripts\") pod \"b6093310-c438-49af-88b6-b14dd2a54a34\" (UID: \"b6093310-c438-49af-88b6-b14dd2a54a34\") " Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.206204 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-logs" (OuterVolumeSpecName: "logs") pod "b6093310-c438-49af-88b6-b14dd2a54a34" (UID: "b6093310-c438-49af-88b6-b14dd2a54a34"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.209244 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b6093310-c438-49af-88b6-b14dd2a54a34" (UID: "b6093310-c438-49af-88b6-b14dd2a54a34"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.220936 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "b6093310-c438-49af-88b6-b14dd2a54a34" (UID: "b6093310-c438-49af-88b6-b14dd2a54a34"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.230696 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6093310-c438-49af-88b6-b14dd2a54a34-kube-api-access-6j5dh" (OuterVolumeSpecName: "kube-api-access-6j5dh") pod "b6093310-c438-49af-88b6-b14dd2a54a34" (UID: "b6093310-c438-49af-88b6-b14dd2a54a34"). InnerVolumeSpecName "kube-api-access-6j5dh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.234290 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-scripts" (OuterVolumeSpecName: "scripts") pod "b6093310-c438-49af-88b6-b14dd2a54a34" (UID: "b6093310-c438-49af-88b6-b14dd2a54a34"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.262518 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6093310-c438-49af-88b6-b14dd2a54a34" (UID: "b6093310-c438-49af-88b6-b14dd2a54a34"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.288278 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-config-data" (OuterVolumeSpecName: "config-data") pod "b6093310-c438-49af-88b6-b14dd2a54a34" (UID: "b6093310-c438-49af-88b6-b14dd2a54a34"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.309061 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.309093 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.309102 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.309147 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.309157 4948 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b6093310-c438-49af-88b6-b14dd2a54a34-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.309167 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6j5dh\" (UniqueName: \"kubernetes.io/projected/b6093310-c438-49af-88b6-b14dd2a54a34-kube-api-access-6j5dh\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.309176 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6093310-c438-49af-88b6-b14dd2a54a34-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.356468 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.411253 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.581143 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dad2f49d-a450-46ed-9d77-15cc21b04853","Type":"ContainerStarted","Data":"37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c"} Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.581335 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerName="glance-log" containerID="cri-o://8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399" gracePeriod=30 Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.582302 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerName="glance-httpd" containerID="cri-o://37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c" gracePeriod=30 Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.591609 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerStarted","Data":"3d0b58f79a4101a472c79a9066f937e017f54113f2910aa3d332331e863ecd0f"} Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.598548 4948 generic.go:334] "Generic (PLEG): container finished" podID="b6093310-c438-49af-88b6-b14dd2a54a34" containerID="3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4" exitCode=143 Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.598576 4948 generic.go:334] "Generic (PLEG): container finished" podID="b6093310-c438-49af-88b6-b14dd2a54a34" containerID="e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de" exitCode=143 Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.598878 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.598912 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b6093310-c438-49af-88b6-b14dd2a54a34","Type":"ContainerDied","Data":"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4"} Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.598960 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b6093310-c438-49af-88b6-b14dd2a54a34","Type":"ContainerDied","Data":"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de"} Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.598970 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b6093310-c438-49af-88b6-b14dd2a54a34","Type":"ContainerDied","Data":"a8db43b7a3b64e0bf24e1317d82a08334136f0d4d66a60a4d1cc5ce10f39b40e"} Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.598999 4948 scope.go:117] "RemoveContainer" containerID="3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.656613 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=35.656593218 podStartE2EDuration="35.656593218s" podCreationTimestamp="2026-01-20 20:06:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:37.645140115 +0000 UTC m=+1025.595865084" watchObservedRunningTime="2026-01-20 20:06:37.656593218 +0000 UTC m=+1025.607318177" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.683898 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.700585 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.728781 4948 scope.go:117] "RemoveContainer" containerID="e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.756414 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-68bc7c4fc6-4mkmv" podStartSLOduration=38.148455912 podStartE2EDuration="39.756393311s" podCreationTimestamp="2026-01-20 20:05:58 +0000 UTC" firstStartedPulling="2026-01-20 20:06:33.437942064 +0000 UTC m=+1021.388667033" lastFinishedPulling="2026-01-20 20:06:35.045879473 +0000 UTC m=+1022.996604432" observedRunningTime="2026-01-20 20:06:37.714613389 +0000 UTC m=+1025.665338358" watchObservedRunningTime="2026-01-20 20:06:37.756393311 +0000 UTC m=+1025.707118280" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.773900 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:37 crc kubenswrapper[4948]: E0120 20:06:37.774448 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" containerName="glance-httpd" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.774536 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" containerName="glance-httpd" Jan 20 20:06:37 crc kubenswrapper[4948]: E0120 20:06:37.774633 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" containerName="glance-log" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.774723 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" containerName="glance-log" Jan 20 20:06:37 crc kubenswrapper[4948]: E0120 20:06:37.774840 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c784c26-fcc8-47ae-a602-48d9a8faaa61" containerName="init" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.774916 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c784c26-fcc8-47ae-a602-48d9a8faaa61" containerName="init" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.775190 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" containerName="glance-log" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.775291 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" containerName="glance-httpd" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.775376 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c784c26-fcc8-47ae-a602-48d9a8faaa61" containerName="init" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.775929 4948 scope.go:117] "RemoveContainer" containerID="3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4" Jan 20 20:06:37 crc kubenswrapper[4948]: E0120 20:06:37.779940 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4\": container with ID starting with 3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4 not found: ID does not exist" containerID="3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.779978 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4"} err="failed to get container status \"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4\": rpc error: code = NotFound desc = could not find container \"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4\": container with ID starting with 3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4 not found: ID does not exist" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.780012 4948 scope.go:117] "RemoveContainer" containerID="e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.780846 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: E0120 20:06:37.786382 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de\": container with ID starting with e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de not found: ID does not exist" containerID="e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.786428 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de"} err="failed to get container status \"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de\": rpc error: code = NotFound desc = could not find container \"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de\": container with ID starting with e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de not found: ID does not exist" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.786464 4948 scope.go:117] "RemoveContainer" containerID="3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.787260 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4"} err="failed to get container status \"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4\": rpc error: code = NotFound desc = could not find container \"3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4\": container with ID starting with 3dd45158885d86a22eab844bed61ed195606ca25bb1f8d5d0a79a65ebe5f3fb4 not found: ID does not exist" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.787302 4948 scope.go:117] "RemoveContainer" containerID="e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.787536 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de"} err="failed to get container status \"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de\": rpc error: code = NotFound desc = could not find container \"e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de\": container with ID starting with e76c0850c21d3c43b55d42673db1afcb06df38f2a7ecd231dbb18af1cbdf12de not found: ID does not exist" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.802950 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.803347 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.819310 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.870419 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-scripts\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.870788 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-logs\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.870963 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-config-data\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.871114 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.871347 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.871393 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.871544 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.871782 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7qxv\" (UniqueName: \"kubernetes.io/projected/249e6833-425e-4243-b1ca-6c1b78a752de-kube-api-access-t7qxv\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.974684 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-config-data\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.974767 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.974799 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.974816 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.974861 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.974881 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7qxv\" (UniqueName: \"kubernetes.io/projected/249e6833-425e-4243-b1ca-6c1b78a752de-kube-api-access-t7qxv\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.974949 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-scripts\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.974967 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-logs\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.975402 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-logs\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.975595 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.976050 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.983396 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-scripts\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.988620 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:37 crc kubenswrapper[4948]: I0120 20:06:37.996991 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.010837 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-config-data\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.030514 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7qxv\" (UniqueName: \"kubernetes.io/projected/249e6833-425e-4243-b1ca-6c1b78a752de-kube-api-access-t7qxv\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.033808 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.124352 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.582215 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6093310-c438-49af-88b6-b14dd2a54a34" path="/var/lib/kubelet/pods/b6093310-c438-49af-88b6-b14dd2a54a34/volumes" Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.631780 4948 generic.go:334] "Generic (PLEG): container finished" podID="0fa00dfc-b064-4964-a65d-80809492c96d" containerID="41b9099addc835da529df8f16b3a0f3f4ac28f84f9ca1ab4cb080c170810471b" exitCode=0 Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.631914 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-99f6n" event={"ID":"0fa00dfc-b064-4964-a65d-80809492c96d","Type":"ContainerDied","Data":"41b9099addc835da529df8f16b3a0f3f4ac28f84f9ca1ab4cb080c170810471b"} Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.640921 4948 generic.go:334] "Generic (PLEG): container finished" podID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerID="8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399" exitCode=143 Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.643233 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dad2f49d-a450-46ed-9d77-15cc21b04853","Type":"ContainerDied","Data":"8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399"} Jan 20 20:06:38 crc kubenswrapper[4948]: I0120 20:06:38.853858 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.397215 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.397273 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.410548 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.540284 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.540618 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.615454 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-logs\") pod \"dad2f49d-a450-46ed-9d77-15cc21b04853\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.615501 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-scripts\") pod \"dad2f49d-a450-46ed-9d77-15cc21b04853\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.615536 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"dad2f49d-a450-46ed-9d77-15cc21b04853\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.615560 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-httpd-run\") pod \"dad2f49d-a450-46ed-9d77-15cc21b04853\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.615601 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-config-data\") pod \"dad2f49d-a450-46ed-9d77-15cc21b04853\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.615644 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-combined-ca-bundle\") pod \"dad2f49d-a450-46ed-9d77-15cc21b04853\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.615728 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66vrv\" (UniqueName: \"kubernetes.io/projected/dad2f49d-a450-46ed-9d77-15cc21b04853-kube-api-access-66vrv\") pod \"dad2f49d-a450-46ed-9d77-15cc21b04853\" (UID: \"dad2f49d-a450-46ed-9d77-15cc21b04853\") " Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.616024 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-logs" (OuterVolumeSpecName: "logs") pod "dad2f49d-a450-46ed-9d77-15cc21b04853" (UID: "dad2f49d-a450-46ed-9d77-15cc21b04853"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.616384 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.616514 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "dad2f49d-a450-46ed-9d77-15cc21b04853" (UID: "dad2f49d-a450-46ed-9d77-15cc21b04853"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.650012 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dad2f49d-a450-46ed-9d77-15cc21b04853-kube-api-access-66vrv" (OuterVolumeSpecName: "kube-api-access-66vrv") pod "dad2f49d-a450-46ed-9d77-15cc21b04853" (UID: "dad2f49d-a450-46ed-9d77-15cc21b04853"). InnerVolumeSpecName "kube-api-access-66vrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.665624 4948 generic.go:334] "Generic (PLEG): container finished" podID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerID="37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c" exitCode=0 Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.665893 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dad2f49d-a450-46ed-9d77-15cc21b04853","Type":"ContainerDied","Data":"37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c"} Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.665998 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"dad2f49d-a450-46ed-9d77-15cc21b04853","Type":"ContainerDied","Data":"6d5d7a2081807480cbd7dea602737d2d78aa4d732ff28f189521aee750183de4"} Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.666084 4948 scope.go:117] "RemoveContainer" containerID="37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.666343 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.670516 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "dad2f49d-a450-46ed-9d77-15cc21b04853" (UID: "dad2f49d-a450-46ed-9d77-15cc21b04853"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.671662 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"249e6833-425e-4243-b1ca-6c1b78a752de","Type":"ContainerStarted","Data":"addc1331ceddb6f7d9a451e3c9646b19f3f21f22acd4b55db3e734991e66ce66"} Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.675980 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-scripts" (OuterVolumeSpecName: "scripts") pod "dad2f49d-a450-46ed-9d77-15cc21b04853" (UID: "dad2f49d-a450-46ed-9d77-15cc21b04853"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.687795 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dad2f49d-a450-46ed-9d77-15cc21b04853" (UID: "dad2f49d-a450-46ed-9d77-15cc21b04853"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.723658 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.725875 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66vrv\" (UniqueName: \"kubernetes.io/projected/dad2f49d-a450-46ed-9d77-15cc21b04853-kube-api-access-66vrv\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.725991 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.726108 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.733772 4948 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dad2f49d-a450-46ed-9d77-15cc21b04853-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.749497 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-config-data" (OuterVolumeSpecName: "config-data") pod "dad2f49d-a450-46ed-9d77-15cc21b04853" (UID: "dad2f49d-a450-46ed-9d77-15cc21b04853"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.782279 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.848276 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.848315 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dad2f49d-a450-46ed-9d77-15cc21b04853-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.855570 4948 scope.go:117] "RemoveContainer" containerID="8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.891274 4948 scope.go:117] "RemoveContainer" containerID="37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c" Jan 20 20:06:39 crc kubenswrapper[4948]: E0120 20:06:39.906027 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c\": container with ID starting with 37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c not found: ID does not exist" containerID="37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.906077 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c"} err="failed to get container status \"37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c\": rpc error: code = NotFound desc = could not find container \"37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c\": container with ID starting with 37ce72fde40ce4d72c575ed552ace1fa6f49d1e215aef481e045e5624527221c not found: ID does not exist" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.906109 4948 scope.go:117] "RemoveContainer" containerID="8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399" Jan 20 20:06:39 crc kubenswrapper[4948]: E0120 20:06:39.906536 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399\": container with ID starting with 8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399 not found: ID does not exist" containerID="8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399" Jan 20 20:06:39 crc kubenswrapper[4948]: I0120 20:06:39.906553 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399"} err="failed to get container status \"8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399\": rpc error: code = NotFound desc = could not find container \"8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399\": container with ID starting with 8ee541a831a37700b5a393e22626528246d10e3b6c5034c0e77f181275003399 not found: ID does not exist" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.100157 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.135258 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.173327 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:40 crc kubenswrapper[4948]: E0120 20:06:40.173803 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerName="glance-log" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.173825 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerName="glance-log" Jan 20 20:06:40 crc kubenswrapper[4948]: E0120 20:06:40.173840 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerName="glance-httpd" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.173846 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerName="glance-httpd" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.174144 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerName="glance-log" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.174182 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" containerName="glance-httpd" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.179481 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.183422 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.186921 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.192726 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.310572 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-99f6n" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.386053 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.386120 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-config-data\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.386202 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-scripts\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.386252 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.386271 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.386367 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.386396 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-logs\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.386413 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hb6d\" (UniqueName: \"kubernetes.io/projected/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-kube-api-access-6hb6d\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.488352 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-scripts\") pod \"0fa00dfc-b064-4964-a65d-80809492c96d\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.488473 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqdrh\" (UniqueName: \"kubernetes.io/projected/0fa00dfc-b064-4964-a65d-80809492c96d-kube-api-access-gqdrh\") pod \"0fa00dfc-b064-4964-a65d-80809492c96d\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.488508 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-combined-ca-bundle\") pod \"0fa00dfc-b064-4964-a65d-80809492c96d\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.488560 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-config-data\") pod \"0fa00dfc-b064-4964-a65d-80809492c96d\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489264 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fa00dfc-b064-4964-a65d-80809492c96d-logs\") pod \"0fa00dfc-b064-4964-a65d-80809492c96d\" (UID: \"0fa00dfc-b064-4964-a65d-80809492c96d\") " Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489578 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489628 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-config-data\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489734 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-scripts\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489807 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489842 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489897 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489939 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-logs\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.489969 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hb6d\" (UniqueName: \"kubernetes.io/projected/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-kube-api-access-6hb6d\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.496055 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-scripts" (OuterVolumeSpecName: "scripts") pod "0fa00dfc-b064-4964-a65d-80809492c96d" (UID: "0fa00dfc-b064-4964-a65d-80809492c96d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.499035 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0fa00dfc-b064-4964-a65d-80809492c96d-logs" (OuterVolumeSpecName: "logs") pod "0fa00dfc-b064-4964-a65d-80809492c96d" (UID: "0fa00dfc-b064-4964-a65d-80809492c96d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.499192 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.506412 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fa00dfc-b064-4964-a65d-80809492c96d-kube-api-access-gqdrh" (OuterVolumeSpecName: "kube-api-access-gqdrh") pod "0fa00dfc-b064-4964-a65d-80809492c96d" (UID: "0fa00dfc-b064-4964-a65d-80809492c96d"). InnerVolumeSpecName "kube-api-access-gqdrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.508509 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.508819 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-logs\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.510600 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-scripts\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.525393 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0fa00dfc-b064-4964-a65d-80809492c96d" (UID: "0fa00dfc-b064-4964-a65d-80809492c96d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.525874 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-config-data\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.531633 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.540183 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.543072 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-config-data" (OuterVolumeSpecName: "config-data") pod "0fa00dfc-b064-4964-a65d-80809492c96d" (UID: "0fa00dfc-b064-4964-a65d-80809492c96d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.543880 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hb6d\" (UniqueName: \"kubernetes.io/projected/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-kube-api-access-6hb6d\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.563558 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.586050 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dad2f49d-a450-46ed-9d77-15cc21b04853" path="/var/lib/kubelet/pods/dad2f49d-a450-46ed-9d77-15cc21b04853/volumes" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.593165 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.593197 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqdrh\" (UniqueName: \"kubernetes.io/projected/0fa00dfc-b064-4964-a65d-80809492c96d-kube-api-access-gqdrh\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.593234 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.593250 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fa00dfc-b064-4964-a65d-80809492c96d-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.593264 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fa00dfc-b064-4964-a65d-80809492c96d-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.697103 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-99f6n" event={"ID":"0fa00dfc-b064-4964-a65d-80809492c96d","Type":"ContainerDied","Data":"7df162546ce92f3033cd568fa11bf79468713e7d542cb0f2f2a72b825b7812b7"} Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.697151 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7df162546ce92f3033cd568fa11bf79468713e7d542cb0f2f2a72b825b7812b7" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.697236 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-99f6n" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.715304 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"249e6833-425e-4243-b1ca-6c1b78a752de","Type":"ContainerStarted","Data":"634c2dafb4145d1d96a9a997c1c934c0ea1e2c777db8aa62bfdd7bea6edb028a"} Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.897285 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.899944 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6965b8b8b4-5f4wt"] Jan 20 20:06:40 crc kubenswrapper[4948]: E0120 20:06:40.900340 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fa00dfc-b064-4964-a65d-80809492c96d" containerName="placement-db-sync" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.900356 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fa00dfc-b064-4964-a65d-80809492c96d" containerName="placement-db-sync" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.900593 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fa00dfc-b064-4964-a65d-80809492c96d" containerName="placement-db-sync" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.901615 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.927879 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-nvrsd" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.928123 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.928818 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.928884 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.928824 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 20 20:06:40 crc kubenswrapper[4948]: I0120 20:06:40.940310 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6965b8b8b4-5f4wt"] Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.001423 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtbjh\" (UniqueName: \"kubernetes.io/projected/923c67b1-e9b6-4c67-86aa-96dc2760ba19-kube-api-access-dtbjh\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.001714 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-scripts\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.001851 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-internal-tls-certs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.002024 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/923c67b1-e9b6-4c67-86aa-96dc2760ba19-logs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.002069 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-public-tls-certs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.002132 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-config-data\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.002171 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-combined-ca-bundle\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.113616 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-internal-tls-certs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.113694 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/923c67b1-e9b6-4c67-86aa-96dc2760ba19-logs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.113748 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-public-tls-certs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.113795 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-config-data\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.113829 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-combined-ca-bundle\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.113925 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtbjh\" (UniqueName: \"kubernetes.io/projected/923c67b1-e9b6-4c67-86aa-96dc2760ba19-kube-api-access-dtbjh\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.113954 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-scripts\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.133935 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/923c67b1-e9b6-4c67-86aa-96dc2760ba19-logs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.137487 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-scripts\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.150436 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-combined-ca-bundle\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.161436 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-config-data\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.162295 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-internal-tls-certs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.162792 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/923c67b1-e9b6-4c67-86aa-96dc2760ba19-public-tls-certs\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.172835 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtbjh\" (UniqueName: \"kubernetes.io/projected/923c67b1-e9b6-4c67-86aa-96dc2760ba19-kube-api-access-dtbjh\") pod \"placement-6965b8b8b4-5f4wt\" (UID: \"923c67b1-e9b6-4c67-86aa-96dc2760ba19\") " pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.253228 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:41 crc kubenswrapper[4948]: I0120 20:06:41.795368 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.795334713 podStartE2EDuration="4.795334713s" podCreationTimestamp="2026-01-20 20:06:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:41.787491301 +0000 UTC m=+1029.738216270" watchObservedRunningTime="2026-01-20 20:06:41.795334713 +0000 UTC m=+1029.746059682" Jan 20 20:06:42 crc kubenswrapper[4948]: I0120 20:06:42.151467 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:06:42 crc kubenswrapper[4948]: I0120 20:06:42.343798 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6965b8b8b4-5f4wt"] Jan 20 20:06:42 crc kubenswrapper[4948]: I0120 20:06:42.789683 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2b8bd9a7-9ee4-4597-ac4e-83691d688db5","Type":"ContainerStarted","Data":"dd2e1c482e1f85060d65d814dc7299e219496bd239b4749a7b94b2a365bc3aeb"} Jan 20 20:06:42 crc kubenswrapper[4948]: I0120 20:06:42.806078 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"249e6833-425e-4243-b1ca-6c1b78a752de","Type":"ContainerStarted","Data":"d478d71e2be882fad485d78cde03700f868017416f23b39fe9e63427faa63cde"} Jan 20 20:06:42 crc kubenswrapper[4948]: I0120 20:06:42.818876 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6965b8b8b4-5f4wt" event={"ID":"923c67b1-e9b6-4c67-86aa-96dc2760ba19","Type":"ContainerStarted","Data":"3aed93aafee614ade07af3d6b8d9be4183e37a72392ee64790b6bbd7e913fe09"} Jan 20 20:06:43 crc kubenswrapper[4948]: I0120 20:06:43.842149 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6965b8b8b4-5f4wt" event={"ID":"923c67b1-e9b6-4c67-86aa-96dc2760ba19","Type":"ContainerStarted","Data":"b38adbd26e7feb522985be2a74c651dc435926c17874945435193879989376f2"} Jan 20 20:06:43 crc kubenswrapper[4948]: I0120 20:06:43.843825 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6965b8b8b4-5f4wt" event={"ID":"923c67b1-e9b6-4c67-86aa-96dc2760ba19","Type":"ContainerStarted","Data":"1be756299eed851b737e5f654b27a9f148025c723a2d7500660b886af08b3205"} Jan 20 20:06:43 crc kubenswrapper[4948]: I0120 20:06:43.844132 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:43 crc kubenswrapper[4948]: I0120 20:06:43.850569 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2b8bd9a7-9ee4-4597-ac4e-83691d688db5","Type":"ContainerStarted","Data":"d489e8dd56e6b521defd6b93328af99da8729aaeae03d32ebde333ba8c9321de"} Jan 20 20:06:43 crc kubenswrapper[4948]: I0120 20:06:43.871887 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6965b8b8b4-5f4wt" podStartSLOduration=3.871866282 podStartE2EDuration="3.871866282s" podCreationTimestamp="2026-01-20 20:06:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:43.860922803 +0000 UTC m=+1031.811647772" watchObservedRunningTime="2026-01-20 20:06:43.871866282 +0000 UTC m=+1031.822591251" Jan 20 20:06:44 crc kubenswrapper[4948]: I0120 20:06:44.864399 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2b8bd9a7-9ee4-4597-ac4e-83691d688db5","Type":"ContainerStarted","Data":"fec5eb47d6b163bbd97d2f2d7a7df78179f0617b26e8b1e9c9d3feace7af8042"} Jan 20 20:06:44 crc kubenswrapper[4948]: I0120 20:06:44.864811 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:06:44 crc kubenswrapper[4948]: I0120 20:06:44.897283 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.897221957 podStartE2EDuration="4.897221957s" podCreationTimestamp="2026-01-20 20:06:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:44.886684159 +0000 UTC m=+1032.837409138" watchObservedRunningTime="2026-01-20 20:06:44.897221957 +0000 UTC m=+1032.847946926" Jan 20 20:06:45 crc kubenswrapper[4948]: I0120 20:06:45.257890 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:06:45 crc kubenswrapper[4948]: I0120 20:06:45.344355 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-5rhgw"] Jan 20 20:06:45 crc kubenswrapper[4948]: I0120 20:06:45.350783 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" podUID="2c19042c-af73-4228-a686-15cb4f7365cf" containerName="dnsmasq-dns" containerID="cri-o://ccc10d498e141427d768779e9420b8e9c911a45978e27249a8c3f3c1284e675b" gracePeriod=10 Jan 20 20:06:45 crc kubenswrapper[4948]: I0120 20:06:45.884351 4948 generic.go:334] "Generic (PLEG): container finished" podID="c230d755-993f-4cc4-b387-992589975cc7" containerID="5c8cff267eece054abb0bed6f832e21378d67433d0359d0efa0a1e57c0898ede" exitCode=0 Jan 20 20:06:45 crc kubenswrapper[4948]: I0120 20:06:45.884452 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hx7kj" event={"ID":"c230d755-993f-4cc4-b387-992589975cc7","Type":"ContainerDied","Data":"5c8cff267eece054abb0bed6f832e21378d67433d0359d0efa0a1e57c0898ede"} Jan 20 20:06:45 crc kubenswrapper[4948]: I0120 20:06:45.888463 4948 generic.go:334] "Generic (PLEG): container finished" podID="2c19042c-af73-4228-a686-15cb4f7365cf" containerID="ccc10d498e141427d768779e9420b8e9c911a45978e27249a8c3f3c1284e675b" exitCode=0 Jan 20 20:06:45 crc kubenswrapper[4948]: I0120 20:06:45.889931 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" event={"ID":"2c19042c-af73-4228-a686-15cb4f7365cf","Type":"ContainerDied","Data":"ccc10d498e141427d768779e9420b8e9c911a45978e27249a8c3f3c1284e675b"} Jan 20 20:06:48 crc kubenswrapper[4948]: I0120 20:06:48.125727 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:48 crc kubenswrapper[4948]: I0120 20:06:48.126645 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:48 crc kubenswrapper[4948]: I0120 20:06:48.179311 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:48 crc kubenswrapper[4948]: I0120 20:06:48.183357 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:48 crc kubenswrapper[4948]: I0120 20:06:48.928770 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:48 crc kubenswrapper[4948]: I0120 20:06:48.928971 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:49 crc kubenswrapper[4948]: I0120 20:06:49.395408 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 20 20:06:49 crc kubenswrapper[4948]: I0120 20:06:49.542645 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:06:49 crc kubenswrapper[4948]: I0120 20:06:49.847596 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" podUID="2c19042c-af73-4228-a686-15cb4f7365cf" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: connect: connection refused" Jan 20 20:06:50 crc kubenswrapper[4948]: I0120 20:06:50.898350 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 20 20:06:50 crc kubenswrapper[4948]: I0120 20:06:50.900661 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 20 20:06:50 crc kubenswrapper[4948]: I0120 20:06:50.955449 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hx7kj" event={"ID":"c230d755-993f-4cc4-b387-992589975cc7","Type":"ContainerDied","Data":"249ccbc6ee7c339d5d8bb4c43c4a6cff0720ca898fb38f3ffbbdcb7423977c33"} Jan 20 20:06:50 crc kubenswrapper[4948]: I0120 20:06:50.955505 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="249ccbc6ee7c339d5d8bb4c43c4a6cff0720ca898fb38f3ffbbdcb7423977c33" Jan 20 20:06:50 crc kubenswrapper[4948]: I0120 20:06:50.959677 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.019345 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.019466 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.093220 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-fernet-keys\") pod \"c230d755-993f-4cc4-b387-992589975cc7\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.093626 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-combined-ca-bundle\") pod \"c230d755-993f-4cc4-b387-992589975cc7\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.093689 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-credential-keys\") pod \"c230d755-993f-4cc4-b387-992589975cc7\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.093771 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-config-data\") pod \"c230d755-993f-4cc4-b387-992589975cc7\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.093871 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sqgf\" (UniqueName: \"kubernetes.io/projected/c230d755-993f-4cc4-b387-992589975cc7-kube-api-access-5sqgf\") pod \"c230d755-993f-4cc4-b387-992589975cc7\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.093922 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-scripts\") pod \"c230d755-993f-4cc4-b387-992589975cc7\" (UID: \"c230d755-993f-4cc4-b387-992589975cc7\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.103634 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c230d755-993f-4cc4-b387-992589975cc7-kube-api-access-5sqgf" (OuterVolumeSpecName: "kube-api-access-5sqgf") pod "c230d755-993f-4cc4-b387-992589975cc7" (UID: "c230d755-993f-4cc4-b387-992589975cc7"). InnerVolumeSpecName "kube-api-access-5sqgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.104312 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-scripts" (OuterVolumeSpecName: "scripts") pod "c230d755-993f-4cc4-b387-992589975cc7" (UID: "c230d755-993f-4cc4-b387-992589975cc7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.104377 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c230d755-993f-4cc4-b387-992589975cc7" (UID: "c230d755-993f-4cc4-b387-992589975cc7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.111853 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c230d755-993f-4cc4-b387-992589975cc7" (UID: "c230d755-993f-4cc4-b387-992589975cc7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.166118 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c230d755-993f-4cc4-b387-992589975cc7" (UID: "c230d755-993f-4cc4-b387-992589975cc7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.192336 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-config-data" (OuterVolumeSpecName: "config-data") pod "c230d755-993f-4cc4-b387-992589975cc7" (UID: "c230d755-993f-4cc4-b387-992589975cc7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.197891 4948 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.197924 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.197935 4948 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.197943 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.197953 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sqgf\" (UniqueName: \"kubernetes.io/projected/c230d755-993f-4cc4-b387-992589975cc7-kube-api-access-5sqgf\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.197961 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c230d755-993f-4cc4-b387-992589975cc7-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.414190 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.505273 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-config\") pod \"2c19042c-af73-4228-a686-15cb4f7365cf\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.505395 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzmmv\" (UniqueName: \"kubernetes.io/projected/2c19042c-af73-4228-a686-15cb4f7365cf-kube-api-access-tzmmv\") pod \"2c19042c-af73-4228-a686-15cb4f7365cf\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.505448 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-swift-storage-0\") pod \"2c19042c-af73-4228-a686-15cb4f7365cf\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.505499 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-svc\") pod \"2c19042c-af73-4228-a686-15cb4f7365cf\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.505534 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-sb\") pod \"2c19042c-af73-4228-a686-15cb4f7365cf\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.505555 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-nb\") pod \"2c19042c-af73-4228-a686-15cb4f7365cf\" (UID: \"2c19042c-af73-4228-a686-15cb4f7365cf\") " Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.538008 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c19042c-af73-4228-a686-15cb4f7365cf-kube-api-access-tzmmv" (OuterVolumeSpecName: "kube-api-access-tzmmv") pod "2c19042c-af73-4228-a686-15cb4f7365cf" (UID: "2c19042c-af73-4228-a686-15cb4f7365cf"). InnerVolumeSpecName "kube-api-access-tzmmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.596000 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-config" (OuterVolumeSpecName: "config") pod "2c19042c-af73-4228-a686-15cb4f7365cf" (UID: "2c19042c-af73-4228-a686-15cb4f7365cf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.617251 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.617297 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzmmv\" (UniqueName: \"kubernetes.io/projected/2c19042c-af73-4228-a686-15cb4f7365cf-kube-api-access-tzmmv\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.629342 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2c19042c-af73-4228-a686-15cb4f7365cf" (UID: "2c19042c-af73-4228-a686-15cb4f7365cf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.630967 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2c19042c-af73-4228-a686-15cb4f7365cf" (UID: "2c19042c-af73-4228-a686-15cb4f7365cf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.649570 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2c19042c-af73-4228-a686-15cb4f7365cf" (UID: "2c19042c-af73-4228-a686-15cb4f7365cf"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.709286 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2c19042c-af73-4228-a686-15cb4f7365cf" (UID: "2c19042c-af73-4228-a686-15cb4f7365cf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.725595 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.725654 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.725667 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.725676 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c19042c-af73-4228-a686-15cb4f7365cf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.981001 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" event={"ID":"2c19042c-af73-4228-a686-15cb4f7365cf","Type":"ContainerDied","Data":"9b6362b96f7426c0085c1916bf04e1f096a2afaf184ba4da1130b4d42379ad86"} Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.981080 4948 scope.go:117] "RemoveContainer" containerID="ccc10d498e141427d768779e9420b8e9c911a45978e27249a8c3f3c1284e675b" Jan 20 20:06:51 crc kubenswrapper[4948]: I0120 20:06:51.981247 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-5rhgw" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.002811 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6cf14434-5ac6-4983-8abe-7305b182c92d","Type":"ContainerStarted","Data":"93552411f8e71701c6a5028894e3abda60c72e94fa54df5b8c4c0b2522393b4d"} Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.007691 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-qxsld" event={"ID":"4a24a241-d8d2-484c-ae7b-436777e1fddd","Type":"ContainerStarted","Data":"7191cc08b8bfa67d24196060b510b4a9e5eb414c25e910fdb77070f33aa9660b"} Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.007784 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.007996 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hx7kj" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.008389 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.063212 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-5rhgw"] Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.064022 4948 scope.go:117] "RemoveContainer" containerID="55f65a7dd9dac3467057d0e1c626cd0593cbf1797d4f0fc4a00f34c0668130c7" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.072821 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-5rhgw"] Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.097527 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-qxsld" podStartSLOduration=11.749341786 podStartE2EDuration="1m4.097495415s" podCreationTimestamp="2026-01-20 20:05:48 +0000 UTC" firstStartedPulling="2026-01-20 20:05:51.519806713 +0000 UTC m=+979.470531682" lastFinishedPulling="2026-01-20 20:06:43.867960352 +0000 UTC m=+1031.818685311" observedRunningTime="2026-01-20 20:06:52.055177668 +0000 UTC m=+1040.005902637" watchObservedRunningTime="2026-01-20 20:06:52.097495415 +0000 UTC m=+1040.048220384" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.262438 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7c45b45594-rdsj9"] Jan 20 20:06:52 crc kubenswrapper[4948]: E0120 20:06:52.262887 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c19042c-af73-4228-a686-15cb4f7365cf" containerName="dnsmasq-dns" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.262911 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c19042c-af73-4228-a686-15cb4f7365cf" containerName="dnsmasq-dns" Jan 20 20:06:52 crc kubenswrapper[4948]: E0120 20:06:52.262926 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c230d755-993f-4cc4-b387-992589975cc7" containerName="keystone-bootstrap" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.262933 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c230d755-993f-4cc4-b387-992589975cc7" containerName="keystone-bootstrap" Jan 20 20:06:52 crc kubenswrapper[4948]: E0120 20:06:52.262944 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c19042c-af73-4228-a686-15cb4f7365cf" containerName="init" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.262951 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c19042c-af73-4228-a686-15cb4f7365cf" containerName="init" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.263112 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c230d755-993f-4cc4-b387-992589975cc7" containerName="keystone-bootstrap" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.263132 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c19042c-af73-4228-a686-15cb4f7365cf" containerName="dnsmasq-dns" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.263880 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.266886 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.267149 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.267340 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.267522 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.269635 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-9zfkq" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.275671 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.291854 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7c45b45594-rdsj9"] Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.437966 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-public-tls-certs\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.438012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-internal-tls-certs\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.438032 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-credential-keys\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.438093 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-config-data\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.438133 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhqjf\" (UniqueName: \"kubernetes.io/projected/413e45d6-d022-4586-82cc-228d8431dce4-kube-api-access-xhqjf\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.438155 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-combined-ca-bundle\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.438172 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-scripts\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.438198 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-fernet-keys\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.539792 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-combined-ca-bundle\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.539834 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-scripts\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.539871 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-fernet-keys\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.539929 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-public-tls-certs\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.539949 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-internal-tls-certs\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.539968 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-credential-keys\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.541079 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-config-data\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.543555 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhqjf\" (UniqueName: \"kubernetes.io/projected/413e45d6-d022-4586-82cc-228d8431dce4-kube-api-access-xhqjf\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.549781 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-fernet-keys\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.553146 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-public-tls-certs\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.553678 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-combined-ca-bundle\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.555346 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-credential-keys\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.560903 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-internal-tls-certs\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.561806 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-scripts\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.563751 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/413e45d6-d022-4586-82cc-228d8431dce4-config-data\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.563792 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhqjf\" (UniqueName: \"kubernetes.io/projected/413e45d6-d022-4586-82cc-228d8431dce4-kube-api-access-xhqjf\") pod \"keystone-7c45b45594-rdsj9\" (UID: \"413e45d6-d022-4586-82cc-228d8431dce4\") " pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.580382 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:52 crc kubenswrapper[4948]: I0120 20:06:52.600612 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c19042c-af73-4228-a686-15cb4f7365cf" path="/var/lib/kubelet/pods/2c19042c-af73-4228-a686-15cb4f7365cf/volumes" Jan 20 20:06:53 crc kubenswrapper[4948]: I0120 20:06:53.059669 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dchk5" event={"ID":"974e456e-61d1-4c5e-a8c9-9ebbb5246848","Type":"ContainerStarted","Data":"3166fa1c233ed00203e5ec4931b40a183731cb06c32aaa5cb427529ecebc197d"} Jan 20 20:06:53 crc kubenswrapper[4948]: I0120 20:06:53.376081 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-dchk5" podStartSLOduration=5.300937009 podStartE2EDuration="1m5.376060419s" podCreationTimestamp="2026-01-20 20:05:48 +0000 UTC" firstStartedPulling="2026-01-20 20:05:50.913853488 +0000 UTC m=+978.864578457" lastFinishedPulling="2026-01-20 20:06:50.988976898 +0000 UTC m=+1038.939701867" observedRunningTime="2026-01-20 20:06:53.088386614 +0000 UTC m=+1041.039111583" watchObservedRunningTime="2026-01-20 20:06:53.376060419 +0000 UTC m=+1041.326785388" Jan 20 20:06:53 crc kubenswrapper[4948]: I0120 20:06:53.378405 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7c45b45594-rdsj9"] Jan 20 20:06:54 crc kubenswrapper[4948]: I0120 20:06:54.117598 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:06:54 crc kubenswrapper[4948]: I0120 20:06:54.118278 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:06:54 crc kubenswrapper[4948]: I0120 20:06:54.118974 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7c45b45594-rdsj9" event={"ID":"413e45d6-d022-4586-82cc-228d8431dce4","Type":"ContainerStarted","Data":"c79dc746bdb02c5f13b2dc4c56541c0e53141e59216689930330abf9e4b56ce4"} Jan 20 20:06:54 crc kubenswrapper[4948]: I0120 20:06:54.119012 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7c45b45594-rdsj9" event={"ID":"413e45d6-d022-4586-82cc-228d8431dce4","Type":"ContainerStarted","Data":"cd44564bc138509d5f4b503b5872c95a9b99b89ec80cce016162a8cfd9c392f1"} Jan 20 20:06:54 crc kubenswrapper[4948]: I0120 20:06:54.119063 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:06:54 crc kubenswrapper[4948]: I0120 20:06:54.185646 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7c45b45594-rdsj9" podStartSLOduration=2.185625622 podStartE2EDuration="2.185625622s" podCreationTimestamp="2026-01-20 20:06:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:06:54.14948267 +0000 UTC m=+1042.100207639" watchObservedRunningTime="2026-01-20 20:06:54.185625622 +0000 UTC m=+1042.136350591" Jan 20 20:06:57 crc kubenswrapper[4948]: I0120 20:06:57.329618 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:57 crc kubenswrapper[4948]: I0120 20:06:57.330398 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:06:57 crc kubenswrapper[4948]: I0120 20:06:57.377431 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 20 20:06:58 crc kubenswrapper[4948]: I0120 20:06:58.142379 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 20 20:06:58 crc kubenswrapper[4948]: I0120 20:06:58.142763 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:06:58 crc kubenswrapper[4948]: I0120 20:06:58.628327 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 20 20:06:59 crc kubenswrapper[4948]: I0120 20:06:59.388154 4948 generic.go:334] "Generic (PLEG): container finished" podID="4a24a241-d8d2-484c-ae7b-436777e1fddd" containerID="7191cc08b8bfa67d24196060b510b4a9e5eb414c25e910fdb77070f33aa9660b" exitCode=0 Jan 20 20:06:59 crc kubenswrapper[4948]: I0120 20:06:59.388478 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-qxsld" event={"ID":"4a24a241-d8d2-484c-ae7b-436777e1fddd","Type":"ContainerDied","Data":"7191cc08b8bfa67d24196060b510b4a9e5eb414c25e910fdb77070f33aa9660b"} Jan 20 20:06:59 crc kubenswrapper[4948]: I0120 20:06:59.393692 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 20 20:06:59 crc kubenswrapper[4948]: I0120 20:06:59.540232 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:07:00 crc kubenswrapper[4948]: I0120 20:07:00.299531 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:07:02 crc kubenswrapper[4948]: I0120 20:07:02.677081 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-79d47bbd4f-rpj54" Jan 20 20:07:02 crc kubenswrapper[4948]: I0120 20:07:02.780170 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5656668848-wwxxb"] Jan 20 20:07:02 crc kubenswrapper[4948]: I0120 20:07:02.780381 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5656668848-wwxxb" podUID="168fa071-a608-4772-8013-f0fee67843a4" containerName="neutron-api" containerID="cri-o://c55ffc95d603f995af1d5ccf5e770b53298103459d5435f8224252f2a6bec3ae" gracePeriod=30 Jan 20 20:07:02 crc kubenswrapper[4948]: I0120 20:07:02.780929 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5656668848-wwxxb" podUID="168fa071-a608-4772-8013-f0fee67843a4" containerName="neutron-httpd" containerID="cri-o://7124509677e848ae63f0a0e9b27eb09c2c49e5b152c91392048787b8ee7f6820" gracePeriod=30 Jan 20 20:07:03 crc kubenswrapper[4948]: I0120 20:07:03.454145 4948 generic.go:334] "Generic (PLEG): container finished" podID="168fa071-a608-4772-8013-f0fee67843a4" containerID="7124509677e848ae63f0a0e9b27eb09c2c49e5b152c91392048787b8ee7f6820" exitCode=0 Jan 20 20:07:03 crc kubenswrapper[4948]: I0120 20:07:03.454220 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5656668848-wwxxb" event={"ID":"168fa071-a608-4772-8013-f0fee67843a4","Type":"ContainerDied","Data":"7124509677e848ae63f0a0e9b27eb09c2c49e5b152c91392048787b8ee7f6820"} Jan 20 20:07:04 crc kubenswrapper[4948]: I0120 20:07:04.468687 4948 generic.go:334] "Generic (PLEG): container finished" podID="974e456e-61d1-4c5e-a8c9-9ebbb5246848" containerID="3166fa1c233ed00203e5ec4931b40a183731cb06c32aaa5cb427529ecebc197d" exitCode=0 Jan 20 20:07:04 crc kubenswrapper[4948]: I0120 20:07:04.468840 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dchk5" event={"ID":"974e456e-61d1-4c5e-a8c9-9ebbb5246848","Type":"ContainerDied","Data":"3166fa1c233ed00203e5ec4931b40a183731cb06c32aaa5cb427529ecebc197d"} Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.044353 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-qxsld" Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.163952 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-db-sync-config-data\") pod \"4a24a241-d8d2-484c-ae7b-436777e1fddd\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.164062 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wn6js\" (UniqueName: \"kubernetes.io/projected/4a24a241-d8d2-484c-ae7b-436777e1fddd-kube-api-access-wn6js\") pod \"4a24a241-d8d2-484c-ae7b-436777e1fddd\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.164121 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-combined-ca-bundle\") pod \"4a24a241-d8d2-484c-ae7b-436777e1fddd\" (UID: \"4a24a241-d8d2-484c-ae7b-436777e1fddd\") " Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.171248 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4a24a241-d8d2-484c-ae7b-436777e1fddd" (UID: "4a24a241-d8d2-484c-ae7b-436777e1fddd"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.172955 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a24a241-d8d2-484c-ae7b-436777e1fddd-kube-api-access-wn6js" (OuterVolumeSpecName: "kube-api-access-wn6js") pod "4a24a241-d8d2-484c-ae7b-436777e1fddd" (UID: "4a24a241-d8d2-484c-ae7b-436777e1fddd"). InnerVolumeSpecName "kube-api-access-wn6js". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.194287 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a24a241-d8d2-484c-ae7b-436777e1fddd" (UID: "4a24a241-d8d2-484c-ae7b-436777e1fddd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.273950 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wn6js\" (UniqueName: \"kubernetes.io/projected/4a24a241-d8d2-484c-ae7b-436777e1fddd-kube-api-access-wn6js\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.274282 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.274293 4948 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4a24a241-d8d2-484c-ae7b-436777e1fddd-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.481807 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-qxsld" Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.481864 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-qxsld" event={"ID":"4a24a241-d8d2-484c-ae7b-436777e1fddd","Type":"ContainerDied","Data":"80d6986ba2e1b9f9ea4a6f053d43c6bb0c9f7d90bf6f5fee7792198e05231092"} Jan 20 20:07:05 crc kubenswrapper[4948]: I0120 20:07:05.481902 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80d6986ba2e1b9f9ea4a6f053d43c6bb0c9f7d90bf6f5fee7792198e05231092" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.334897 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6d76c4759-rj9ns"] Jan 20 20:07:06 crc kubenswrapper[4948]: E0120 20:07:06.335826 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a24a241-d8d2-484c-ae7b-436777e1fddd" containerName="barbican-db-sync" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.335850 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a24a241-d8d2-484c-ae7b-436777e1fddd" containerName="barbican-db-sync" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.336196 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a24a241-d8d2-484c-ae7b-436777e1fddd" containerName="barbican-db-sync" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.347272 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.352987 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.353186 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.354659 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-mrjrl" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.397492 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-88477f558-k4bcx"] Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.402400 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.405768 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.432717 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6d76c4759-rj9ns"] Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.498938 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-88477f558-k4bcx"] Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517325 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-config-data-custom\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517521 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-config-data-custom\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517600 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-config-data\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517640 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-combined-ca-bundle\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517664 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb8m6\" (UniqueName: \"kubernetes.io/projected/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-kube-api-access-nb8m6\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517758 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-config-data\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517808 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-combined-ca-bundle\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517840 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xblx7\" (UniqueName: \"kubernetes.io/projected/9b73cf57-92bd-47c5-8f21-ffcc9438594b-kube-api-access-xblx7\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.517879 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b73cf57-92bd-47c5-8f21-ffcc9438594b-logs\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.519444 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-logs\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.624886 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-config-data-custom\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.624973 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-config-data\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.625005 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-combined-ca-bundle\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.625030 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb8m6\" (UniqueName: \"kubernetes.io/projected/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-kube-api-access-nb8m6\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.625079 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-config-data\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.625113 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-combined-ca-bundle\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.625136 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xblx7\" (UniqueName: \"kubernetes.io/projected/9b73cf57-92bd-47c5-8f21-ffcc9438594b-kube-api-access-xblx7\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.625167 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b73cf57-92bd-47c5-8f21-ffcc9438594b-logs\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.625243 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-logs\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.625280 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-config-data-custom\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.627359 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9b73cf57-92bd-47c5-8f21-ffcc9438594b-logs\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.629301 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-logs\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.640484 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-combined-ca-bundle\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.646022 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-config-data\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.648377 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-combined-ca-bundle\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.651862 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-config-data-custom\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.657644 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-2nmnv"] Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.658174 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b73cf57-92bd-47c5-8f21-ffcc9438594b-config-data\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.659163 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.670449 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-config-data-custom\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.673315 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xblx7\" (UniqueName: \"kubernetes.io/projected/9b73cf57-92bd-47c5-8f21-ffcc9438594b-kube-api-access-xblx7\") pod \"barbican-worker-6d76c4759-rj9ns\" (UID: \"9b73cf57-92bd-47c5-8f21-ffcc9438594b\") " pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.682068 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb8m6\" (UniqueName: \"kubernetes.io/projected/e71b28b0-54d9-48ce-9442-412fbdd5fe0f-kube-api-access-nb8m6\") pod \"barbican-keystone-listener-88477f558-k4bcx\" (UID: \"e71b28b0-54d9-48ce-9442-412fbdd5fe0f\") " pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.682590 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6d76c4759-rj9ns" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.711006 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-2nmnv"] Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.795280 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-88477f558-k4bcx" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.830444 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.830730 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzs92\" (UniqueName: \"kubernetes.io/projected/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-kube-api-access-gzs92\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.830913 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.835893 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-config\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.836033 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.836127 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-svc\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.840207 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-76b984f6db-smbhz"] Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.842284 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.851202 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.870599 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-76b984f6db-smbhz"] Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.938380 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.938781 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzs92\" (UniqueName: \"kubernetes.io/projected/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-kube-api-access-gzs92\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.938883 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.938985 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcrr8\" (UniqueName: \"kubernetes.io/projected/81ccff20-6613-42e9-a2fb-22a520b8b4cf-kube-api-access-zcrr8\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.939543 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/81ccff20-6613-42e9-a2fb-22a520b8b4cf-logs\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.939734 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-config\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.939927 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data-custom\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.940050 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.940217 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.940336 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-svc\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.940510 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-combined-ca-bundle\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.940858 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.941470 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-config\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.941758 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.942277 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-svc\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.945441 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:06 crc kubenswrapper[4948]: I0120 20:07:06.967337 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzs92\" (UniqueName: \"kubernetes.io/projected/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-kube-api-access-gzs92\") pod \"dnsmasq-dns-85ff748b95-2nmnv\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.066929 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-combined-ca-bundle\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.067225 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcrr8\" (UniqueName: \"kubernetes.io/projected/81ccff20-6613-42e9-a2fb-22a520b8b4cf-kube-api-access-zcrr8\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.067289 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/81ccff20-6613-42e9-a2fb-22a520b8b4cf-logs\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.067407 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data-custom\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.067495 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.068139 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/81ccff20-6613-42e9-a2fb-22a520b8b4cf-logs\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.071152 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data-custom\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.074532 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.078958 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-combined-ca-bundle\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.099715 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.100890 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcrr8\" (UniqueName: \"kubernetes.io/projected/81ccff20-6613-42e9-a2fb-22a520b8b4cf-kube-api-access-zcrr8\") pod \"barbican-api-76b984f6db-smbhz\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.163874 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.472880 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dchk5" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.541207 4948 generic.go:334] "Generic (PLEG): container finished" podID="168fa071-a608-4772-8013-f0fee67843a4" containerID="c55ffc95d603f995af1d5ccf5e770b53298103459d5435f8224252f2a6bec3ae" exitCode=0 Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.541534 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5656668848-wwxxb" event={"ID":"168fa071-a608-4772-8013-f0fee67843a4","Type":"ContainerDied","Data":"c55ffc95d603f995af1d5ccf5e770b53298103459d5435f8224252f2a6bec3ae"} Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.565977 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-dchk5" event={"ID":"974e456e-61d1-4c5e-a8c9-9ebbb5246848","Type":"ContainerDied","Data":"566e0d816ec12a3294bf5b34b925771c1b35726bf257c61e64de24434be4f13a"} Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.566022 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="566e0d816ec12a3294bf5b34b925771c1b35726bf257c61e64de24434be4f13a" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.566085 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-dchk5" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.582474 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/974e456e-61d1-4c5e-a8c9-9ebbb5246848-etc-machine-id\") pod \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.582549 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-config-data\") pod \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.582586 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-db-sync-config-data\") pod \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.582605 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-scripts\") pod \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.582747 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gk68v\" (UniqueName: \"kubernetes.io/projected/974e456e-61d1-4c5e-a8c9-9ebbb5246848-kube-api-access-gk68v\") pod \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.582849 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-combined-ca-bundle\") pod \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\" (UID: \"974e456e-61d1-4c5e-a8c9-9ebbb5246848\") " Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.585007 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/974e456e-61d1-4c5e-a8c9-9ebbb5246848-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "974e456e-61d1-4c5e-a8c9-9ebbb5246848" (UID: "974e456e-61d1-4c5e-a8c9-9ebbb5246848"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.590145 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "974e456e-61d1-4c5e-a8c9-9ebbb5246848" (UID: "974e456e-61d1-4c5e-a8c9-9ebbb5246848"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.597997 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/974e456e-61d1-4c5e-a8c9-9ebbb5246848-kube-api-access-gk68v" (OuterVolumeSpecName: "kube-api-access-gk68v") pod "974e456e-61d1-4c5e-a8c9-9ebbb5246848" (UID: "974e456e-61d1-4c5e-a8c9-9ebbb5246848"). InnerVolumeSpecName "kube-api-access-gk68v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.601234 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-scripts" (OuterVolumeSpecName: "scripts") pod "974e456e-61d1-4c5e-a8c9-9ebbb5246848" (UID: "974e456e-61d1-4c5e-a8c9-9ebbb5246848"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.640238 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "974e456e-61d1-4c5e-a8c9-9ebbb5246848" (UID: "974e456e-61d1-4c5e-a8c9-9ebbb5246848"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.685392 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.685423 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/974e456e-61d1-4c5e-a8c9-9ebbb5246848-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.685434 4948 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.685444 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.685466 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gk68v\" (UniqueName: \"kubernetes.io/projected/974e456e-61d1-4c5e-a8c9-9ebbb5246848-kube-api-access-gk68v\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.686256 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-config-data" (OuterVolumeSpecName: "config-data") pod "974e456e-61d1-4c5e-a8c9-9ebbb5246848" (UID: "974e456e-61d1-4c5e-a8c9-9ebbb5246848"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:07 crc kubenswrapper[4948]: I0120 20:07:07.787177 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/974e456e-61d1-4c5e-a8c9-9ebbb5246848-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:08 crc kubenswrapper[4948]: E0120 20:07:08.794156 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24:latest" Jan 20 20:07:08 crc kubenswrapper[4948]: E0120 20:07:08.794679 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24:latest,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q4qf6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(6cf14434-5ac6-4983-8abe-7305b182c92d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 20 20:07:08 crc kubenswrapper[4948]: E0120 20:07:08.796342 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.918155 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.953164 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:08 crc kubenswrapper[4948]: E0120 20:07:08.953943 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="168fa071-a608-4772-8013-f0fee67843a4" containerName="neutron-api" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.954109 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="168fa071-a608-4772-8013-f0fee67843a4" containerName="neutron-api" Jan 20 20:07:08 crc kubenswrapper[4948]: E0120 20:07:08.954201 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="168fa071-a608-4772-8013-f0fee67843a4" containerName="neutron-httpd" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.954275 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="168fa071-a608-4772-8013-f0fee67843a4" containerName="neutron-httpd" Jan 20 20:07:08 crc kubenswrapper[4948]: E0120 20:07:08.954353 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="974e456e-61d1-4c5e-a8c9-9ebbb5246848" containerName="cinder-db-sync" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.954416 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="974e456e-61d1-4c5e-a8c9-9ebbb5246848" containerName="cinder-db-sync" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.954682 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="168fa071-a608-4772-8013-f0fee67843a4" containerName="neutron-httpd" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.954969 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="974e456e-61d1-4c5e-a8c9-9ebbb5246848" containerName="cinder-db-sync" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.955082 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="168fa071-a608-4772-8013-f0fee67843a4" containerName="neutron-api" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.956158 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.961260 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.961531 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.961573 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-2fhzd" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.961617 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 20 20:07:08 crc kubenswrapper[4948]: I0120 20:07:08.995361 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.018453 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g5hr\" (UniqueName: \"kubernetes.io/projected/168fa071-a608-4772-8013-f0fee67843a4-kube-api-access-4g5hr\") pod \"168fa071-a608-4772-8013-f0fee67843a4\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.018503 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-config\") pod \"168fa071-a608-4772-8013-f0fee67843a4\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.018561 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-combined-ca-bundle\") pod \"168fa071-a608-4772-8013-f0fee67843a4\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.018588 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-httpd-config\") pod \"168fa071-a608-4772-8013-f0fee67843a4\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.018674 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-ovndb-tls-certs\") pod \"168fa071-a608-4772-8013-f0fee67843a4\" (UID: \"168fa071-a608-4772-8013-f0fee67843a4\") " Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.087496 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/168fa071-a608-4772-8013-f0fee67843a4-kube-api-access-4g5hr" (OuterVolumeSpecName: "kube-api-access-4g5hr") pod "168fa071-a608-4772-8013-f0fee67843a4" (UID: "168fa071-a608-4772-8013-f0fee67843a4"). InnerVolumeSpecName "kube-api-access-4g5hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.087606 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "168fa071-a608-4772-8013-f0fee67843a4" (UID: "168fa071-a608-4772-8013-f0fee67843a4"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.129449 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.129538 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.129674 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jt9g\" (UniqueName: \"kubernetes.io/projected/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-kube-api-access-2jt9g\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.129737 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.129770 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-scripts\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.129799 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.129882 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4g5hr\" (UniqueName: \"kubernetes.io/projected/168fa071-a608-4772-8013-f0fee67843a4-kube-api-access-4g5hr\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.129897 4948 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.301301 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.301753 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.301967 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jt9g\" (UniqueName: \"kubernetes.io/projected/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-kube-api-access-2jt9g\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.302057 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.302108 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-scripts\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.302153 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.306592 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.322336 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-2nmnv"] Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.330228 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "168fa071-a608-4772-8013-f0fee67843a4" (UID: "168fa071-a608-4772-8013-f0fee67843a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.361371 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.367321 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.374587 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jt9g\" (UniqueName: \"kubernetes.io/projected/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-kube-api-access-2jt9g\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.375435 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.431432 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.444457 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-scripts\") pod \"cinder-scheduler-0\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.456477 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.456560 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.472186 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"08d9c3660e3ecd0832afba6cf5911a8e8427e7bed01955d0e134ac074a19a3f1"} pod="openstack/horizon-67dd67cb9b-9w4wk" containerMessage="Container horizon failed startup probe, will be restarted" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.472276 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" containerID="cri-o://08d9c3660e3ecd0832afba6cf5911a8e8427e7bed01955d0e134ac074a19a3f1" gracePeriod=30 Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.545878 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-config" (OuterVolumeSpecName: "config") pod "168fa071-a608-4772-8013-f0fee67843a4" (UID: "168fa071-a608-4772-8013-f0fee67843a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.547602 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.547696 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.547848 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "168fa071-a608-4772-8013-f0fee67843a4" (UID: "168fa071-a608-4772-8013-f0fee67843a4"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.548587 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"3d0b58f79a4101a472c79a9066f937e017f54113f2910aa3d332331e863ecd0f"} pod="openstack/horizon-68bc7c4fc6-4mkmv" containerMessage="Container horizon failed startup probe, will be restarted" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.548631 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" containerID="cri-o://3d0b58f79a4101a472c79a9066f937e017f54113f2910aa3d332331e863ecd0f" gracePeriod=30 Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.557288 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-pr8mc"] Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.558932 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.572422 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.588013 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-pr8mc"] Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.627815 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerName="ceilometer-notification-agent" containerID="cri-o://c7008d934d23533401eb78ae14168e519b7174e79007eb1e219bd4edca5be4ef" gracePeriod=30 Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.628012 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.628197 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5656668848-wwxxb" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.628749 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerName="sg-core" containerID="cri-o://93552411f8e71701c6a5028894e3abda60c72e94fa54df5b8c4c0b2522393b4d" gracePeriod=30 Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.631520 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5656668848-wwxxb" event={"ID":"168fa071-a608-4772-8013-f0fee67843a4","Type":"ContainerDied","Data":"8e5897fc437e203533acffdee71fddb47611dfebec0c8653e74cf221d85bd0e4"} Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.631596 4948 scope.go:117] "RemoveContainer" containerID="7124509677e848ae63f0a0e9b27eb09c2c49e5b152c91392048787b8ee7f6820" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.631924 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.634121 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.635259 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-config\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.635302 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.635356 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.635396 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.635454 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.635493 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzx4j\" (UniqueName: \"kubernetes.io/projected/bd4c5973-d20d-4277-b4df-2438dfc641d8-kube-api-access-rzx4j\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.635589 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.635601 4948 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/168fa071-a608-4772-8013-f0fee67843a4-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.660282 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758247 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-scripts\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758317 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f93da57-3189-424f-952f-7731884075f8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758373 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-config\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758401 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758431 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758493 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758528 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt492\" (UniqueName: \"kubernetes.io/projected/5f93da57-3189-424f-952f-7731884075f8-kube-api-access-dt492\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758580 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758627 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f93da57-3189-424f-952f-7731884075f8-logs\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758662 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758736 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758781 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzx4j\" (UniqueName: \"kubernetes.io/projected/bd4c5973-d20d-4277-b4df-2438dfc641d8-kube-api-access-rzx4j\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.758819 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data-custom\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.762816 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.762873 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.763007 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-config\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.763607 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.764207 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.796982 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5656668848-wwxxb"] Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.809192 4948 scope.go:117] "RemoveContainer" containerID="c55ffc95d603f995af1d5ccf5e770b53298103459d5435f8224252f2a6bec3ae" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.833004 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5656668848-wwxxb"] Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.844499 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzx4j\" (UniqueName: \"kubernetes.io/projected/bd4c5973-d20d-4277-b4df-2438dfc641d8-kube-api-access-rzx4j\") pod \"dnsmasq-dns-5c9776ccc5-pr8mc\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.861024 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f93da57-3189-424f-952f-7731884075f8-logs\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.861293 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.861396 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data-custom\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.861515 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-scripts\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.861600 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f93da57-3189-424f-952f-7731884075f8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.861756 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.861884 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt492\" (UniqueName: \"kubernetes.io/projected/5f93da57-3189-424f-952f-7731884075f8-kube-api-access-dt492\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.862665 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f93da57-3189-424f-952f-7731884075f8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.863933 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f93da57-3189-424f-952f-7731884075f8-logs\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.873470 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.874533 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-scripts\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.884503 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.884865 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data-custom\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.924274 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt492\" (UniqueName: \"kubernetes.io/projected/5f93da57-3189-424f-952f-7731884075f8-kube-api-access-dt492\") pod \"cinder-api-0\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " pod="openstack/cinder-api-0" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.941244 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:09 crc kubenswrapper[4948]: I0120 20:07:09.975953 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.028637 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6d76c4759-rj9ns"] Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.110065 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-76b984f6db-smbhz"] Jan 20 20:07:10 crc kubenswrapper[4948]: W0120 20:07:10.204936 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81ccff20_6613_42e9_a2fb_22a520b8b4cf.slice/crio-ef4bd13744820cdaf4d2ae9e6074eb557b0d849f4b7e6164a7376d20a7bab8d3 WatchSource:0}: Error finding container ef4bd13744820cdaf4d2ae9e6074eb557b0d849f4b7e6164a7376d20a7bab8d3: Status 404 returned error can't find the container with id ef4bd13744820cdaf4d2ae9e6074eb557b0d849f4b7e6164a7376d20a7bab8d3 Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.337050 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-88477f558-k4bcx"] Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.420657 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-2nmnv"] Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.597507 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="168fa071-a608-4772-8013-f0fee67843a4" path="/var/lib/kubelet/pods/168fa071-a608-4772-8013-f0fee67843a4/volumes" Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.670158 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.758152 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d76c4759-rj9ns" event={"ID":"9b73cf57-92bd-47c5-8f21-ffcc9438594b","Type":"ContainerStarted","Data":"b915f9c4e8c1243d3c9818b223090df1a556d30f82f827ab5b6b7e9b1889fa71"} Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.759613 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-88477f558-k4bcx" event={"ID":"e71b28b0-54d9-48ce-9442-412fbdd5fe0f","Type":"ContainerStarted","Data":"003752d01ad296ec4a963d8ff5494416e4cb0f5960ea56c074d3f414cb158482"} Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.763030 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76b984f6db-smbhz" event={"ID":"81ccff20-6613-42e9-a2fb-22a520b8b4cf","Type":"ContainerStarted","Data":"ef4bd13744820cdaf4d2ae9e6074eb557b0d849f4b7e6164a7376d20a7bab8d3"} Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.764153 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" event={"ID":"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0","Type":"ContainerStarted","Data":"da882305d6a38b61b6f1bdfa0b78e258108ebd8eb4733ef6dbe30edf09b27846"} Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.765635 4948 generic.go:334] "Generic (PLEG): container finished" podID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerID="93552411f8e71701c6a5028894e3abda60c72e94fa54df5b8c4c0b2522393b4d" exitCode=2 Jan 20 20:07:10 crc kubenswrapper[4948]: I0120 20:07:10.765658 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6cf14434-5ac6-4983-8abe-7305b182c92d","Type":"ContainerDied","Data":"93552411f8e71701c6a5028894e3abda60c72e94fa54df5b8c4c0b2522393b4d"} Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.066665 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-pr8mc"] Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.175440 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.802557 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76b984f6db-smbhz" event={"ID":"81ccff20-6613-42e9-a2fb-22a520b8b4cf","Type":"ContainerStarted","Data":"03f395f9b6d04ecdcec62bc88225d7c31cdae6ddb3b4a206a8dadf5906f944ee"} Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.802890 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76b984f6db-smbhz" event={"ID":"81ccff20-6613-42e9-a2fb-22a520b8b4cf","Type":"ContainerStarted","Data":"6f79b8772a40b0b359303838f32c21f3cf48f7121d6d990464ce31990f6f11f8"} Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.804781 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.804825 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.822736 4948 generic.go:334] "Generic (PLEG): container finished" podID="9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" containerID="1258ee4c3ce8476bd8c4ba0b692f6fc41a64f490af07513ed001d11cd5536db4" exitCode=0 Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.822949 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" event={"ID":"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0","Type":"ContainerDied","Data":"1258ee4c3ce8476bd8c4ba0b692f6fc41a64f490af07513ed001d11cd5536db4"} Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.843175 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-76b984f6db-smbhz" podStartSLOduration=5.84315307 podStartE2EDuration="5.84315307s" podCreationTimestamp="2026-01-20 20:07:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:11.827796626 +0000 UTC m=+1059.778521595" watchObservedRunningTime="2026-01-20 20:07:11.84315307 +0000 UTC m=+1059.793878039" Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.844005 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" event={"ID":"bd4c5973-d20d-4277-b4df-2438dfc641d8","Type":"ContainerStarted","Data":"d9ae499fc2569925d4383a1af600720a02165aed2618c77c12ec33dbb9c0e9a7"} Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.845065 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f93da57-3189-424f-952f-7731884075f8","Type":"ContainerStarted","Data":"ad1c8c77529fe0fe17a1db2b1fee753e1cb7884e58531c9dda96fc4bbb08ffb3"} Jan 20 20:07:11 crc kubenswrapper[4948]: I0120 20:07:11.846652 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85","Type":"ContainerStarted","Data":"289b36c4a41addf13f3c3b05deb5126a5d29409d10243daad58554241dd082a5"} Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.683176 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.714425 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-nb\") pod \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.714832 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-svc\") pod \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.714870 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-config\") pod \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.715032 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-swift-storage-0\") pod \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.715053 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzs92\" (UniqueName: \"kubernetes.io/projected/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-kube-api-access-gzs92\") pod \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.715100 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-sb\") pod \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\" (UID: \"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0\") " Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.752810 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-kube-api-access-gzs92" (OuterVolumeSpecName: "kube-api-access-gzs92") pod "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" (UID: "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0"). InnerVolumeSpecName "kube-api-access-gzs92". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.766443 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" (UID: "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.823415 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.823445 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzs92\" (UniqueName: \"kubernetes.io/projected/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-kube-api-access-gzs92\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.824349 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" (UID: "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.838742 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" (UID: "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.885733 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" event={"ID":"9faf890e-ed96-4eb5-9030-0cdbbb5de4e0","Type":"ContainerDied","Data":"da882305d6a38b61b6f1bdfa0b78e258108ebd8eb4733ef6dbe30edf09b27846"} Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.885790 4948 scope.go:117] "RemoveContainer" containerID="1258ee4c3ce8476bd8c4ba0b692f6fc41a64f490af07513ed001d11cd5536db4" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.885931 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-2nmnv" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.891946 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" (UID: "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.892187 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-config" (OuterVolumeSpecName: "config") pod "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" (UID: "9faf890e-ed96-4eb5-9030-0cdbbb5de4e0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.893377 4948 generic.go:334] "Generic (PLEG): container finished" podID="bd4c5973-d20d-4277-b4df-2438dfc641d8" containerID="2350ed0189e540bfad2705253dc5a355eb4fa3176ce9891e477ee8d3198026ed" exitCode=0 Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.894652 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" event={"ID":"bd4c5973-d20d-4277-b4df-2438dfc641d8","Type":"ContainerDied","Data":"2350ed0189e540bfad2705253dc5a355eb4fa3176ce9891e477ee8d3198026ed"} Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.929403 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.929456 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.929466 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:12 crc kubenswrapper[4948]: I0120 20:07:12.929475 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:13 crc kubenswrapper[4948]: I0120 20:07:13.408524 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-2nmnv"] Jan 20 20:07:13 crc kubenswrapper[4948]: I0120 20:07:13.444339 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-2nmnv"] Jan 20 20:07:13 crc kubenswrapper[4948]: I0120 20:07:13.980130 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f93da57-3189-424f-952f-7731884075f8","Type":"ContainerStarted","Data":"d66f639b5e1eaf715bbec8f3da02dc2437de7bf931f7a254d8fe5fd07294c985"} Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.184729 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.584106 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" path="/var/lib/kubelet/pods/9faf890e-ed96-4eb5-9030-0cdbbb5de4e0/volumes" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.928378 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-869694d5d6-n6ftn"] Jan 20 20:07:14 crc kubenswrapper[4948]: E0120 20:07:14.928888 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" containerName="init" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.928908 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" containerName="init" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.929089 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9faf890e-ed96-4eb5-9030-0cdbbb5de4e0" containerName="init" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.930034 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.934113 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.934281 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.947528 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-869694d5d6-n6ftn"] Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.954461 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-config-data\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.954525 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftgjk\" (UniqueName: \"kubernetes.io/projected/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-kube-api-access-ftgjk\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.954578 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-logs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.954658 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-config-data-custom\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.954683 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-public-tls-certs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.954791 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-internal-tls-certs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:14 crc kubenswrapper[4948]: I0120 20:07:14.954816 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-combined-ca-bundle\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.003397 4948 generic.go:334] "Generic (PLEG): container finished" podID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerID="c7008d934d23533401eb78ae14168e519b7174e79007eb1e219bd4edca5be4ef" exitCode=0 Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.003466 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6cf14434-5ac6-4983-8abe-7305b182c92d","Type":"ContainerDied","Data":"c7008d934d23533401eb78ae14168e519b7174e79007eb1e219bd4edca5be4ef"} Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.014812 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" event={"ID":"bd4c5973-d20d-4277-b4df-2438dfc641d8","Type":"ContainerStarted","Data":"ecf9a5fe437d4ecf14d06208938a593d4105c0583511fd482e857bc588faac44"} Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.015919 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.027995 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85","Type":"ContainerStarted","Data":"fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094"} Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.048618 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" podStartSLOduration=6.048591052 podStartE2EDuration="6.048591052s" podCreationTimestamp="2026-01-20 20:07:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:15.046457422 +0000 UTC m=+1062.997182391" watchObservedRunningTime="2026-01-20 20:07:15.048591052 +0000 UTC m=+1062.999316031" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.057692 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-config-data\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.057752 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftgjk\" (UniqueName: \"kubernetes.io/projected/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-kube-api-access-ftgjk\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.057783 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-logs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.057853 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-config-data-custom\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.057875 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-public-tls-certs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.057919 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-internal-tls-certs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.057941 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-combined-ca-bundle\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.059199 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-logs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.066688 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-combined-ca-bundle\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.084827 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftgjk\" (UniqueName: \"kubernetes.io/projected/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-kube-api-access-ftgjk\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.084996 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-config-data-custom\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.085129 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-internal-tls-certs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.086062 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-config-data\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.088965 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7eca20c7-5485-4fce-9c6e-d3bd3943adc1-public-tls-certs\") pod \"barbican-api-869694d5d6-n6ftn\" (UID: \"7eca20c7-5485-4fce-9c6e-d3bd3943adc1\") " pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.247939 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.300386 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:07:15 crc kubenswrapper[4948]: I0120 20:07:15.303036 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6965b8b8b4-5f4wt" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.431576 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.465089 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-log-httpd\") pod \"6cf14434-5ac6-4983-8abe-7305b182c92d\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.465163 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-run-httpd\") pod \"6cf14434-5ac6-4983-8abe-7305b182c92d\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.465241 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-combined-ca-bundle\") pod \"6cf14434-5ac6-4983-8abe-7305b182c92d\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.465296 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4qf6\" (UniqueName: \"kubernetes.io/projected/6cf14434-5ac6-4983-8abe-7305b182c92d-kube-api-access-q4qf6\") pod \"6cf14434-5ac6-4983-8abe-7305b182c92d\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.465344 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-sg-core-conf-yaml\") pod \"6cf14434-5ac6-4983-8abe-7305b182c92d\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.465396 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-scripts\") pod \"6cf14434-5ac6-4983-8abe-7305b182c92d\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.465495 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-config-data\") pod \"6cf14434-5ac6-4983-8abe-7305b182c92d\" (UID: \"6cf14434-5ac6-4983-8abe-7305b182c92d\") " Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.466907 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6cf14434-5ac6-4983-8abe-7305b182c92d" (UID: "6cf14434-5ac6-4983-8abe-7305b182c92d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.467369 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6cf14434-5ac6-4983-8abe-7305b182c92d" (UID: "6cf14434-5ac6-4983-8abe-7305b182c92d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.502653 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cf14434-5ac6-4983-8abe-7305b182c92d-kube-api-access-q4qf6" (OuterVolumeSpecName: "kube-api-access-q4qf6") pod "6cf14434-5ac6-4983-8abe-7305b182c92d" (UID: "6cf14434-5ac6-4983-8abe-7305b182c92d"). InnerVolumeSpecName "kube-api-access-q4qf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.558425 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-scripts" (OuterVolumeSpecName: "scripts") pod "6cf14434-5ac6-4983-8abe-7305b182c92d" (UID: "6cf14434-5ac6-4983-8abe-7305b182c92d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.569004 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-869694d5d6-n6ftn"] Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.569271 4948 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.569311 4948 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6cf14434-5ac6-4983-8abe-7305b182c92d-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.569324 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4qf6\" (UniqueName: \"kubernetes.io/projected/6cf14434-5ac6-4983-8abe-7305b182c92d-kube-api-access-q4qf6\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.569340 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.598792 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6cf14434-5ac6-4983-8abe-7305b182c92d" (UID: "6cf14434-5ac6-4983-8abe-7305b182c92d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.606274 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-config-data" (OuterVolumeSpecName: "config-data") pod "6cf14434-5ac6-4983-8abe-7305b182c92d" (UID: "6cf14434-5ac6-4983-8abe-7305b182c92d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.615867 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6cf14434-5ac6-4983-8abe-7305b182c92d" (UID: "6cf14434-5ac6-4983-8abe-7305b182c92d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:17 crc kubenswrapper[4948]: W0120 20:07:17.664221 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7eca20c7_5485_4fce_9c6e_d3bd3943adc1.slice/crio-a35d110ff44825c9c2cdb3c3660803f38154f72584f9e282a7e89673cbd88815 WatchSource:0}: Error finding container a35d110ff44825c9c2cdb3c3660803f38154f72584f9e282a7e89673cbd88815: Status 404 returned error can't find the container with id a35d110ff44825c9c2cdb3c3660803f38154f72584f9e282a7e89673cbd88815 Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.676632 4948 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.676674 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:17 crc kubenswrapper[4948]: I0120 20:07:17.676684 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cf14434-5ac6-4983-8abe-7305b182c92d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.196824 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6cf14434-5ac6-4983-8abe-7305b182c92d","Type":"ContainerDied","Data":"a44d30b75b642fc8df3424a754bafd81309f5f693cb36cc33a8d40e6be64690a"} Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.197136 4948 scope.go:117] "RemoveContainer" containerID="93552411f8e71701c6a5028894e3abda60c72e94fa54df5b8c4c0b2522393b4d" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.197368 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.209194 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-869694d5d6-n6ftn" event={"ID":"7eca20c7-5485-4fce-9c6e-d3bd3943adc1","Type":"ContainerStarted","Data":"a35d110ff44825c9c2cdb3c3660803f38154f72584f9e282a7e89673cbd88815"} Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.226093 4948 scope.go:117] "RemoveContainer" containerID="c7008d934d23533401eb78ae14168e519b7174e79007eb1e219bd4edca5be4ef" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.297078 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.322457 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.327627 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:18 crc kubenswrapper[4948]: E0120 20:07:18.328597 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerName="ceilometer-notification-agent" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.328679 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerName="ceilometer-notification-agent" Jan 20 20:07:18 crc kubenswrapper[4948]: E0120 20:07:18.328764 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerName="sg-core" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.328851 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerName="sg-core" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.329131 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerName="sg-core" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.329205 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" containerName="ceilometer-notification-agent" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.331098 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.333719 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-scripts\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.333795 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.333819 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-log-httpd\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.333860 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-run-httpd\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.333895 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-config-data\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.333930 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc9wf\" (UniqueName: \"kubernetes.io/projected/d51108ae-667c-4f4f-9f7b-99c96c573cca-kube-api-access-tc9wf\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.333946 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.346736 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.346966 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.394032 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.436646 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-config-data\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.437914 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc9wf\" (UniqueName: \"kubernetes.io/projected/d51108ae-667c-4f4f-9f7b-99c96c573cca-kube-api-access-tc9wf\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.437953 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.438021 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-scripts\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.438093 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.438122 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-log-httpd\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.438177 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-run-httpd\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.438621 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-run-httpd\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.440911 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-log-httpd\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.446268 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-scripts\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.446945 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.447621 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.447727 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-config-data\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.457815 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc9wf\" (UniqueName: \"kubernetes.io/projected/d51108ae-667c-4f4f-9f7b-99c96c573cca-kube-api-access-tc9wf\") pod \"ceilometer-0\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " pod="openstack/ceilometer-0" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.584545 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cf14434-5ac6-4983-8abe-7305b182c92d" path="/var/lib/kubelet/pods/6cf14434-5ac6-4983-8abe-7305b182c92d/volumes" Jan 20 20:07:18 crc kubenswrapper[4948]: I0120 20:07:18.680537 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.258265 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-88477f558-k4bcx" event={"ID":"e71b28b0-54d9-48ce-9442-412fbdd5fe0f","Type":"ContainerStarted","Data":"1f5051e8ef8e2de4b916c56dae7cdb1822621c3da16dcba391428453af9a1190"} Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.280345 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-869694d5d6-n6ftn" event={"ID":"7eca20c7-5485-4fce-9c6e-d3bd3943adc1","Type":"ContainerStarted","Data":"8f86295fde55a04aa631a1f24057bc8873c1e891df2cc280daaab53e9bd1d8a8"} Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.315797 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85","Type":"ContainerStarted","Data":"0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d"} Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.356270 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=9.692011012 podStartE2EDuration="11.356254123s" podCreationTimestamp="2026-01-20 20:07:08 +0000 UTC" firstStartedPulling="2026-01-20 20:07:10.842661938 +0000 UTC m=+1058.793386897" lastFinishedPulling="2026-01-20 20:07:12.506905039 +0000 UTC m=+1060.457630008" observedRunningTime="2026-01-20 20:07:19.353389832 +0000 UTC m=+1067.304114801" watchObservedRunningTime="2026-01-20 20:07:19.356254123 +0000 UTC m=+1067.306979092" Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.362187 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api-log" containerID="cri-o://d66f639b5e1eaf715bbec8f3da02dc2437de7bf931f7a254d8fe5fd07294c985" gracePeriod=30 Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.362317 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api" containerID="cri-o://bd0057d43e437d4afecf99dbbfc5f55d1385b8784e2201192d21bf290177e9e0" gracePeriod=30 Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.362731 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f93da57-3189-424f-952f-7731884075f8","Type":"ContainerStarted","Data":"bd0057d43e437d4afecf99dbbfc5f55d1385b8784e2201192d21bf290177e9e0"} Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.362806 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.373661 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d76c4759-rj9ns" event={"ID":"9b73cf57-92bd-47c5-8f21-ffcc9438594b","Type":"ContainerStarted","Data":"3a1df854645cd812a8a11facedb94727beffcd220aefb6efc2c80aa02cb2b3fd"} Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.404452 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=10.404427866 podStartE2EDuration="10.404427866s" podCreationTimestamp="2026-01-20 20:07:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:19.399113875 +0000 UTC m=+1067.349838844" watchObservedRunningTime="2026-01-20 20:07:19.404427866 +0000 UTC m=+1067.355152835" Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.506360 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:19 crc kubenswrapper[4948]: W0120 20:07:19.562595 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd51108ae_667c_4f4f_9f7b_99c96c573cca.slice/crio-0685e146920bdd9e668bce3a6d342ffe128b5919d37a71570f5ed34c25ee9695 WatchSource:0}: Error finding container 0685e146920bdd9e668bce3a6d342ffe128b5919d37a71570f5ed34c25ee9695: Status 404 returned error can't find the container with id 0685e146920bdd9e668bce3a6d342ffe128b5919d37a71570f5ed34c25ee9695 Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.573427 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.576860 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.162:8080/\": dial tcp 10.217.0.162:8080: connect: connection refused" Jan 20 20:07:19 crc kubenswrapper[4948]: I0120 20:07:19.942930 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.031184 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qvbf9"] Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.031429 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" podUID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerName="dnsmasq-dns" containerID="cri-o://7f7e235466d04e56bb30af71494aca05f50c25feea4f98a3876fbdb6429db220" gracePeriod=10 Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.254629 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.254958 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" podUID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.151:5353: connect: connection refused" Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.254700 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.423009 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerStarted","Data":"0685e146920bdd9e668bce3a6d342ffe128b5919d37a71570f5ed34c25ee9695"} Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.425987 4948 generic.go:334] "Generic (PLEG): container finished" podID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerID="7f7e235466d04e56bb30af71494aca05f50c25feea4f98a3876fbdb6429db220" exitCode=0 Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.426044 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" event={"ID":"40932965-aaf9-44be-8d0e-23a7cba8f60a","Type":"ContainerDied","Data":"7f7e235466d04e56bb30af71494aca05f50c25feea4f98a3876fbdb6429db220"} Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.427831 4948 generic.go:334] "Generic (PLEG): container finished" podID="5f93da57-3189-424f-952f-7731884075f8" containerID="d66f639b5e1eaf715bbec8f3da02dc2437de7bf931f7a254d8fe5fd07294c985" exitCode=143 Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.427884 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f93da57-3189-424f-952f-7731884075f8","Type":"ContainerDied","Data":"d66f639b5e1eaf715bbec8f3da02dc2437de7bf931f7a254d8fe5fd07294c985"} Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.429249 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d76c4759-rj9ns" event={"ID":"9b73cf57-92bd-47c5-8f21-ffcc9438594b","Type":"ContainerStarted","Data":"63b53d84a9112398e000c8232c54a347c83c36af1c971cd8396071cfd9dc13ba"} Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.431976 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-88477f558-k4bcx" event={"ID":"e71b28b0-54d9-48ce-9442-412fbdd5fe0f","Type":"ContainerStarted","Data":"313fc5f9f36fb9318f521ae9abead4b533a322c5aa0ddeef02a250830d18c8f5"} Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.439562 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-869694d5d6-n6ftn" event={"ID":"7eca20c7-5485-4fce-9c6e-d3bd3943adc1","Type":"ContainerStarted","Data":"a39484126ae1efa2552bad2290a2541688d9ebb4424345ca5db636fd12315c19"} Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.439823 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.439913 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.491274 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6d76c4759-rj9ns" podStartSLOduration=8.169820193 podStartE2EDuration="14.491255349s" podCreationTimestamp="2026-01-20 20:07:06 +0000 UTC" firstStartedPulling="2026-01-20 20:07:10.198511033 +0000 UTC m=+1058.149236002" lastFinishedPulling="2026-01-20 20:07:16.519946189 +0000 UTC m=+1064.470671158" observedRunningTime="2026-01-20 20:07:20.473317912 +0000 UTC m=+1068.424042881" watchObservedRunningTime="2026-01-20 20:07:20.491255349 +0000 UTC m=+1068.441980318" Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.577112 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-869694d5d6-n6ftn" podStartSLOduration=6.577091216 podStartE2EDuration="6.577091216s" podCreationTimestamp="2026-01-20 20:07:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:20.574316208 +0000 UTC m=+1068.525041177" watchObservedRunningTime="2026-01-20 20:07:20.577091216 +0000 UTC m=+1068.527816185" Jan 20 20:07:20 crc kubenswrapper[4948]: I0120 20:07:20.583785 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-88477f558-k4bcx" podStartSLOduration=8.459578357 podStartE2EDuration="14.583764745s" podCreationTimestamp="2026-01-20 20:07:06 +0000 UTC" firstStartedPulling="2026-01-20 20:07:10.407182854 +0000 UTC m=+1058.357907823" lastFinishedPulling="2026-01-20 20:07:16.531369242 +0000 UTC m=+1064.482094211" observedRunningTime="2026-01-20 20:07:20.52702366 +0000 UTC m=+1068.477748629" watchObservedRunningTime="2026-01-20 20:07:20.583764745 +0000 UTC m=+1068.534489714" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.255274 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.255814 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.390525 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.409043 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-sb\") pod \"40932965-aaf9-44be-8d0e-23a7cba8f60a\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.409175 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-nb\") pod \"40932965-aaf9-44be-8d0e-23a7cba8f60a\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.409196 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-swift-storage-0\") pod \"40932965-aaf9-44be-8d0e-23a7cba8f60a\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.409222 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-svc\") pod \"40932965-aaf9-44be-8d0e-23a7cba8f60a\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.409257 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-config\") pod \"40932965-aaf9-44be-8d0e-23a7cba8f60a\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.409281 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q95jl\" (UniqueName: \"kubernetes.io/projected/40932965-aaf9-44be-8d0e-23a7cba8f60a-kube-api-access-q95jl\") pod \"40932965-aaf9-44be-8d0e-23a7cba8f60a\" (UID: \"40932965-aaf9-44be-8d0e-23a7cba8f60a\") " Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.491901 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40932965-aaf9-44be-8d0e-23a7cba8f60a-kube-api-access-q95jl" (OuterVolumeSpecName: "kube-api-access-q95jl") pod "40932965-aaf9-44be-8d0e-23a7cba8f60a" (UID: "40932965-aaf9-44be-8d0e-23a7cba8f60a"). InnerVolumeSpecName "kube-api-access-q95jl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.512022 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q95jl\" (UniqueName: \"kubernetes.io/projected/40932965-aaf9-44be-8d0e-23a7cba8f60a-kube-api-access-q95jl\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.527523 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerStarted","Data":"967941366e604b4d950bf3d9619707dd25f4eaaa548c6ced7375fadc22974fc6"} Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.530647 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.531330 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-qvbf9" event={"ID":"40932965-aaf9-44be-8d0e-23a7cba8f60a","Type":"ContainerDied","Data":"6c2186b11676105a97b7c5433ddbb1b6b055f8bd023af00fb3e110e43e945db6"} Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.531390 4948 scope.go:117] "RemoveContainer" containerID="7f7e235466d04e56bb30af71494aca05f50c25feea4f98a3876fbdb6429db220" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.647128 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "40932965-aaf9-44be-8d0e-23a7cba8f60a" (UID: "40932965-aaf9-44be-8d0e-23a7cba8f60a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.686411 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "40932965-aaf9-44be-8d0e-23a7cba8f60a" (UID: "40932965-aaf9-44be-8d0e-23a7cba8f60a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.692721 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-config" (OuterVolumeSpecName: "config") pod "40932965-aaf9-44be-8d0e-23a7cba8f60a" (UID: "40932965-aaf9-44be-8d0e-23a7cba8f60a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.703497 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "40932965-aaf9-44be-8d0e-23a7cba8f60a" (UID: "40932965-aaf9-44be-8d0e-23a7cba8f60a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.729254 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.749147 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.749509 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.749614 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.778257 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "40932965-aaf9-44be-8d0e-23a7cba8f60a" (UID: "40932965-aaf9-44be-8d0e-23a7cba8f60a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.784478 4948 scope.go:117] "RemoveContainer" containerID="d592504d8c0a6f9a38e08f7fe6cb01a68ac263f89b75bd519dd5859a5418ae56" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.881352 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40932965-aaf9-44be-8d0e-23a7cba8f60a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.935042 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qvbf9"] Jan 20 20:07:21 crc kubenswrapper[4948]: I0120 20:07:21.972576 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-qvbf9"] Jan 20 20:07:22 crc kubenswrapper[4948]: I0120 20:07:22.260882 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:22 crc kubenswrapper[4948]: I0120 20:07:22.260882 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:22 crc kubenswrapper[4948]: I0120 20:07:22.543264 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerStarted","Data":"cf7ffd612025ead678392921343d34c52b2036b6245ddd684837d138126544f9"} Jan 20 20:07:22 crc kubenswrapper[4948]: I0120 20:07:22.582044 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40932965-aaf9-44be-8d0e-23a7cba8f60a" path="/var/lib/kubelet/pods/40932965-aaf9-44be-8d0e-23a7cba8f60a/volumes" Jan 20 20:07:23 crc kubenswrapper[4948]: I0120 20:07:23.704508 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerStarted","Data":"56f79db8b2d0ba9877ee75f5fb6727f5e0c0c6d653fad44bf2b97a23f46d95c4"} Jan 20 20:07:24 crc kubenswrapper[4948]: I0120 20:07:24.574417 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.162:8080/\": dial tcp 10.217.0.162:8080: connect: connection refused" Jan 20 20:07:26 crc kubenswrapper[4948]: I0120 20:07:26.337904 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:26 crc kubenswrapper[4948]: I0120 20:07:26.337904 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:26 crc kubenswrapper[4948]: I0120 20:07:26.733324 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:26 crc kubenswrapper[4948]: I0120 20:07:26.783694 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerStarted","Data":"c841b5fa069dfe5a6fb9a7bfd4a789f0ae4ffbaab7e5270f29a883038b3d172f"} Jan 20 20:07:26 crc kubenswrapper[4948]: I0120 20:07:26.784927 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 20:07:26 crc kubenswrapper[4948]: I0120 20:07:26.834167 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.55673186 podStartE2EDuration="8.834140471s" podCreationTimestamp="2026-01-20 20:07:18 +0000 UTC" firstStartedPulling="2026-01-20 20:07:19.565218162 +0000 UTC m=+1067.515943131" lastFinishedPulling="2026-01-20 20:07:25.842626773 +0000 UTC m=+1073.793351742" observedRunningTime="2026-01-20 20:07:26.816206434 +0000 UTC m=+1074.766931403" watchObservedRunningTime="2026-01-20 20:07:26.834140471 +0000 UTC m=+1074.784865440" Jan 20 20:07:27 crc kubenswrapper[4948]: I0120 20:07:27.304033 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:27 crc kubenswrapper[4948]: I0120 20:07:27.329979 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:28 crc kubenswrapper[4948]: I0120 20:07:28.535141 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:29 crc kubenswrapper[4948]: I0120 20:07:29.260924 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-869694d5d6-n6ftn" podUID="7eca20c7-5485-4fce-9c6e-d3bd3943adc1" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.165:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:29 crc kubenswrapper[4948]: I0120 20:07:29.704324 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7c45b45594-rdsj9" Jan 20 20:07:30 crc kubenswrapper[4948]: I0120 20:07:30.050359 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 20 20:07:30 crc kubenswrapper[4948]: I0120 20:07:30.117904 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.164:8776/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:30 crc kubenswrapper[4948]: I0120 20:07:30.119302 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:30 crc kubenswrapper[4948]: I0120 20:07:30.252948 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-869694d5d6-n6ftn" podUID="7eca20c7-5485-4fce-9c6e-d3bd3943adc1" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.165:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:07:30 crc kubenswrapper[4948]: I0120 20:07:30.818736 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="cinder-scheduler" containerID="cri-o://fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094" gracePeriod=30 Jan 20 20:07:30 crc kubenswrapper[4948]: I0120 20:07:30.819294 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="probe" containerID="cri-o://0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d" gracePeriod=30 Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.803956 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 20 20:07:31 crc kubenswrapper[4948]: E0120 20:07:31.804361 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerName="init" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.804381 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerName="init" Jan 20 20:07:31 crc kubenswrapper[4948]: E0120 20:07:31.804402 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerName="dnsmasq-dns" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.804409 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerName="dnsmasq-dns" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.804570 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="40932965-aaf9-44be-8d0e-23a7cba8f60a" containerName="dnsmasq-dns" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.805177 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.808281 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.808521 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-pqddk" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.809350 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.818686 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.877597 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftksk\" (UniqueName: \"kubernetes.io/projected/d1222f27-af2a-46fd-a296-37bdb8db4486-kube-api-access-ftksk\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.877677 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1222f27-af2a-46fd-a296-37bdb8db4486-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.877737 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d1222f27-af2a-46fd-a296-37bdb8db4486-openstack-config\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.877761 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d1222f27-af2a-46fd-a296-37bdb8db4486-openstack-config-secret\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.980555 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1222f27-af2a-46fd-a296-37bdb8db4486-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.980628 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d1222f27-af2a-46fd-a296-37bdb8db4486-openstack-config\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.980656 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d1222f27-af2a-46fd-a296-37bdb8db4486-openstack-config-secret\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.980782 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftksk\" (UniqueName: \"kubernetes.io/projected/d1222f27-af2a-46fd-a296-37bdb8db4486-kube-api-access-ftksk\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.981572 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d1222f27-af2a-46fd-a296-37bdb8db4486-openstack-config\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.988294 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1222f27-af2a-46fd-a296-37bdb8db4486-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:31 crc kubenswrapper[4948]: I0120 20:07:31.988861 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d1222f27-af2a-46fd-a296-37bdb8db4486-openstack-config-secret\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:32 crc kubenswrapper[4948]: I0120 20:07:32.017369 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftksk\" (UniqueName: \"kubernetes.io/projected/d1222f27-af2a-46fd-a296-37bdb8db4486-kube-api-access-ftksk\") pod \"openstackclient\" (UID: \"d1222f27-af2a-46fd-a296-37bdb8db4486\") " pod="openstack/openstackclient" Jan 20 20:07:32 crc kubenswrapper[4948]: I0120 20:07:32.125129 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 20 20:07:32 crc kubenswrapper[4948]: I0120 20:07:32.821908 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 20 20:07:32 crc kubenswrapper[4948]: I0120 20:07:32.863117 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d1222f27-af2a-46fd-a296-37bdb8db4486","Type":"ContainerStarted","Data":"130b48c49ae8b28f12347977c807df57e38a879f7a8e8fe24622624599d7ac6c"} Jan 20 20:07:32 crc kubenswrapper[4948]: I0120 20:07:32.865158 4948 generic.go:334] "Generic (PLEG): container finished" podID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerID="0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d" exitCode=0 Jan 20 20:07:32 crc kubenswrapper[4948]: I0120 20:07:32.865210 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85","Type":"ContainerDied","Data":"0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d"} Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.676638 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.837274 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data\") pod \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.837369 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-scripts\") pod \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.837395 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data-custom\") pod \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.837432 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-combined-ca-bundle\") pod \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.837461 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-etc-machine-id\") pod \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.837486 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jt9g\" (UniqueName: \"kubernetes.io/projected/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-kube-api-access-2jt9g\") pod \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.842888 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" (UID: "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.846007 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" (UID: "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.855941 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-scripts" (OuterVolumeSpecName: "scripts") pod "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" (UID: "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.856941 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-kube-api-access-2jt9g" (OuterVolumeSpecName: "kube-api-access-2jt9g") pod "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" (UID: "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85"). InnerVolumeSpecName "kube-api-access-2jt9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.944250 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.944278 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jt9g\" (UniqueName: \"kubernetes.io/projected/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-kube-api-access-2jt9g\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.944293 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.944302 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.982511 4948 generic.go:334] "Generic (PLEG): container finished" podID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerID="fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094" exitCode=0 Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.982568 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85","Type":"ContainerDied","Data":"fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094"} Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.982602 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85","Type":"ContainerDied","Data":"289b36c4a41addf13f3c3b05deb5126a5d29409d10243daad58554241dd082a5"} Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.982636 4948 scope.go:117] "RemoveContainer" containerID="0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d" Jan 20 20:07:33 crc kubenswrapper[4948]: I0120 20:07:33.982640 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.046863 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" (UID: "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.047299 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-combined-ca-bundle\") pod \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\" (UID: \"9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85\") " Jan 20 20:07:34 crc kubenswrapper[4948]: W0120 20:07:34.047820 4948 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85/volumes/kubernetes.io~secret/combined-ca-bundle Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.047839 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" (UID: "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.127215 4948 scope.go:117] "RemoveContainer" containerID="fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.151838 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.156205 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data" (OuterVolumeSpecName: "config-data") pod "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" (UID: "9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.254039 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.264535 4948 scope.go:117] "RemoveContainer" containerID="0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d" Jan 20 20:07:34 crc kubenswrapper[4948]: E0120 20:07:34.274081 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d\": container with ID starting with 0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d not found: ID does not exist" containerID="0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.274130 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d"} err="failed to get container status \"0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d\": rpc error: code = NotFound desc = could not find container \"0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d\": container with ID starting with 0496c67fb71b039b4d257c61db4d07342a3bf0d95030a70fde15fcce95cb0c8d not found: ID does not exist" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.274156 4948 scope.go:117] "RemoveContainer" containerID="fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094" Jan 20 20:07:34 crc kubenswrapper[4948]: E0120 20:07:34.275227 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094\": container with ID starting with fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094 not found: ID does not exist" containerID="fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.275262 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094"} err="failed to get container status \"fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094\": rpc error: code = NotFound desc = could not find container \"fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094\": container with ID starting with fd81757ecb755a7fd09377f0c65d0771b3f42f40851defb581a39d733f224094 not found: ID does not exist" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.318456 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.326925 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.351204 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:34 crc kubenswrapper[4948]: E0120 20:07:34.351655 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="cinder-scheduler" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.351671 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="cinder-scheduler" Jan 20 20:07:34 crc kubenswrapper[4948]: E0120 20:07:34.351688 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="probe" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.351694 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="probe" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.351955 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="cinder-scheduler" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.351986 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" containerName="probe" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.352951 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.359245 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e95290f6-0498-4bfa-8653-3a53edf4f01f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.359302 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tknsh\" (UniqueName: \"kubernetes.io/projected/e95290f6-0498-4bfa-8653-3a53edf4f01f-kube-api-access-tknsh\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.359387 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-scripts\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.359404 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.359421 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-config-data\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.359469 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.360177 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.380440 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.461173 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-scripts\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.461222 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.461246 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-config-data\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.461301 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.461393 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e95290f6-0498-4bfa-8653-3a53edf4f01f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.461431 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tknsh\" (UniqueName: \"kubernetes.io/projected/e95290f6-0498-4bfa-8653-3a53edf4f01f-kube-api-access-tknsh\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.462408 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e95290f6-0498-4bfa-8653-3a53edf4f01f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.467752 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.468127 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.482036 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-scripts\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.483589 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e95290f6-0498-4bfa-8653-3a53edf4f01f-config-data\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.495089 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tknsh\" (UniqueName: \"kubernetes.io/projected/e95290f6-0498-4bfa-8653-3a53edf4f01f-kube-api-access-tknsh\") pod \"cinder-scheduler-0\" (UID: \"e95290f6-0498-4bfa-8653-3a53edf4f01f\") " pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.510435 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.584930 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85" path="/var/lib/kubelet/pods/9ea549ff-6ceb-4ed8-b6fe-3ac7ebaabe85/volumes" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.681028 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.863357 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-869694d5d6-n6ftn" Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.940594 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-76b984f6db-smbhz"] Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.940888 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" containerID="cri-o://6f79b8772a40b0b359303838f32c21f3cf48f7121d6d990464ce31990f6f11f8" gracePeriod=30 Jan 20 20:07:34 crc kubenswrapper[4948]: I0120 20:07:34.941037 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api" containerID="cri-o://03f395f9b6d04ecdcec62bc88225d7c31cdae6ddb3b4a206a8dadf5906f944ee" gracePeriod=30 Jan 20 20:07:35 crc kubenswrapper[4948]: I0120 20:07:35.332649 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 20 20:07:36 crc kubenswrapper[4948]: I0120 20:07:36.074200 4948 generic.go:334] "Generic (PLEG): container finished" podID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerID="6f79b8772a40b0b359303838f32c21f3cf48f7121d6d990464ce31990f6f11f8" exitCode=143 Jan 20 20:07:36 crc kubenswrapper[4948]: I0120 20:07:36.074504 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76b984f6db-smbhz" event={"ID":"81ccff20-6613-42e9-a2fb-22a520b8b4cf","Type":"ContainerDied","Data":"6f79b8772a40b0b359303838f32c21f3cf48f7121d6d990464ce31990f6f11f8"} Jan 20 20:07:36 crc kubenswrapper[4948]: I0120 20:07:36.076006 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e95290f6-0498-4bfa-8653-3a53edf4f01f","Type":"ContainerStarted","Data":"fd76f93838fa98ebf5f7b0e1c5a84b9a5f7a292c971615e76ad3c9323f4bfd3d"} Jan 20 20:07:37 crc kubenswrapper[4948]: I0120 20:07:37.117955 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e95290f6-0498-4bfa-8653-3a53edf4f01f","Type":"ContainerStarted","Data":"520a8f170a5da0db79c5d4533878e2c174af3ce3406fa012f7c2f6b7f85fd8c3"} Jan 20 20:07:38 crc kubenswrapper[4948]: I0120 20:07:38.143757 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e95290f6-0498-4bfa-8653-3a53edf4f01f","Type":"ContainerStarted","Data":"8c7d9c936dc0ef37fc2f3d03a8aad17565d1ccfb6fc143c6b88a528c6a028ebd"} Jan 20 20:07:38 crc kubenswrapper[4948]: I0120 20:07:38.167874 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.167851813 podStartE2EDuration="4.167851813s" podCreationTimestamp="2026-01-20 20:07:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:38.166128415 +0000 UTC m=+1086.116853394" watchObservedRunningTime="2026-01-20 20:07:38.167851813 +0000 UTC m=+1086.118576782" Jan 20 20:07:38 crc kubenswrapper[4948]: I0120 20:07:38.240577 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": read tcp 10.217.0.2:47500->10.217.0.161:9311: read: connection reset by peer" Jan 20 20:07:38 crc kubenswrapper[4948]: I0120 20:07:38.240681 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-76b984f6db-smbhz" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.161:9311/healthcheck\": read tcp 10.217.0.2:47504->10.217.0.161:9311: read: connection reset by peer" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.158550 4948 generic.go:334] "Generic (PLEG): container finished" podID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerID="03f395f9b6d04ecdcec62bc88225d7c31cdae6ddb3b4a206a8dadf5906f944ee" exitCode=0 Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.160387 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76b984f6db-smbhz" event={"ID":"81ccff20-6613-42e9-a2fb-22a520b8b4cf","Type":"ContainerDied","Data":"03f395f9b6d04ecdcec62bc88225d7c31cdae6ddb3b4a206a8dadf5906f944ee"} Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.345283 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.401201 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-combined-ca-bundle\") pod \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.401255 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data\") pod \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.504852 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data-custom\") pod \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.505473 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcrr8\" (UniqueName: \"kubernetes.io/projected/81ccff20-6613-42e9-a2fb-22a520b8b4cf-kube-api-access-zcrr8\") pod \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.505619 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/81ccff20-6613-42e9-a2fb-22a520b8b4cf-logs\") pod \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\" (UID: \"81ccff20-6613-42e9-a2fb-22a520b8b4cf\") " Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.515195 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81ccff20-6613-42e9-a2fb-22a520b8b4cf-logs" (OuterVolumeSpecName: "logs") pod "81ccff20-6613-42e9-a2fb-22a520b8b4cf" (UID: "81ccff20-6613-42e9-a2fb-22a520b8b4cf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.516184 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "81ccff20-6613-42e9-a2fb-22a520b8b4cf" (UID: "81ccff20-6613-42e9-a2fb-22a520b8b4cf"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.521653 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81ccff20-6613-42e9-a2fb-22a520b8b4cf-kube-api-access-zcrr8" (OuterVolumeSpecName: "kube-api-access-zcrr8") pod "81ccff20-6613-42e9-a2fb-22a520b8b4cf" (UID: "81ccff20-6613-42e9-a2fb-22a520b8b4cf"). InnerVolumeSpecName "kube-api-access-zcrr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.521996 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "81ccff20-6613-42e9-a2fb-22a520b8b4cf" (UID: "81ccff20-6613-42e9-a2fb-22a520b8b4cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.596460 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data" (OuterVolumeSpecName: "config-data") pod "81ccff20-6613-42e9-a2fb-22a520b8b4cf" (UID: "81ccff20-6613-42e9-a2fb-22a520b8b4cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.608387 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcrr8\" (UniqueName: \"kubernetes.io/projected/81ccff20-6613-42e9-a2fb-22a520b8b4cf-kube-api-access-zcrr8\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.608420 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/81ccff20-6613-42e9-a2fb-22a520b8b4cf-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.608434 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.608442 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.608451 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81ccff20-6613-42e9-a2fb-22a520b8b4cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:39 crc kubenswrapper[4948]: I0120 20:07:39.683831 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.183883 4948 generic.go:334] "Generic (PLEG): container finished" podID="4d2c0905-915e-4504-8454-ee3500220ab3" containerID="08d9c3660e3ecd0832afba6cf5911a8e8427e7bed01955d0e134ac074a19a3f1" exitCode=137 Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.187978 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dd67cb9b-9w4wk" event={"ID":"4d2c0905-915e-4504-8454-ee3500220ab3","Type":"ContainerDied","Data":"08d9c3660e3ecd0832afba6cf5911a8e8427e7bed01955d0e134ac074a19a3f1"} Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.188068 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dd67cb9b-9w4wk" event={"ID":"4d2c0905-915e-4504-8454-ee3500220ab3","Type":"ContainerStarted","Data":"3a23ab38989e7c7f201254011c0807c65fcca348eb7fda45253cf536df81d13d"} Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.216151 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-76b984f6db-smbhz" event={"ID":"81ccff20-6613-42e9-a2fb-22a520b8b4cf","Type":"ContainerDied","Data":"ef4bd13744820cdaf4d2ae9e6074eb557b0d849f4b7e6164a7376d20a7bab8d3"} Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.216210 4948 scope.go:117] "RemoveContainer" containerID="03f395f9b6d04ecdcec62bc88225d7c31cdae6ddb3b4a206a8dadf5906f944ee" Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.216370 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-76b984f6db-smbhz" Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.298194 4948 generic.go:334] "Generic (PLEG): container finished" podID="af522f17-3cad-4004-b112-51e47fa9fea7" containerID="3d0b58f79a4101a472c79a9066f937e017f54113f2910aa3d332331e863ecd0f" exitCode=137 Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.298845 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerDied","Data":"3d0b58f79a4101a472c79a9066f937e017f54113f2910aa3d332331e863ecd0f"} Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.298913 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerStarted","Data":"f5337fdeea822defb3bda066c6a194da1d66af7fc4c86187fb510469631f72ad"} Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.382641 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-76b984f6db-smbhz"] Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.382953 4948 scope.go:117] "RemoveContainer" containerID="6f79b8772a40b0b359303838f32c21f3cf48f7121d6d990464ce31990f6f11f8" Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.394307 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-76b984f6db-smbhz"] Jan 20 20:07:40 crc kubenswrapper[4948]: I0120 20:07:40.586179 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" path="/var/lib/kubelet/pods/81ccff20-6613-42e9-a2fb-22a520b8b4cf/volumes" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.884975 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.885846 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="ceilometer-central-agent" containerID="cri-o://967941366e604b4d950bf3d9619707dd25f4eaaa548c6ced7375fadc22974fc6" gracePeriod=30 Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.886004 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="proxy-httpd" containerID="cri-o://c841b5fa069dfe5a6fb9a7bfd4a789f0ae4ffbaab7e5270f29a883038b3d172f" gracePeriod=30 Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.886052 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="sg-core" containerID="cri-o://56f79db8b2d0ba9877ee75f5fb6727f5e0c0c6d653fad44bf2b97a23f46d95c4" gracePeriod=30 Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.886091 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="ceilometer-notification-agent" containerID="cri-o://cf7ffd612025ead678392921343d34c52b2036b6245ddd684837d138126544f9" gracePeriod=30 Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.911653 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.166:3000/\": EOF" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.950206 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-646f4c575-wzbtn"] Jan 20 20:07:43 crc kubenswrapper[4948]: E0120 20:07:43.956434 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.956504 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api" Jan 20 20:07:43 crc kubenswrapper[4948]: E0120 20:07:43.956589 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.956598 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.957225 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api-log" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.957254 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccff20-6613-42e9-a2fb-22a520b8b4cf" containerName="barbican-api" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.958554 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.966351 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.966566 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.967190 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 20 20:07:43 crc kubenswrapper[4948]: I0120 20:07:43.973942 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-646f4c575-wzbtn"] Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.018540 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-config-data\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.018975 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0464310-34e8-4747-9a37-6a9ce764a73a-log-httpd\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.019057 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-internal-tls-certs\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.019141 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-public-tls-certs\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.019170 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv5r8\" (UniqueName: \"kubernetes.io/projected/e0464310-34e8-4747-9a37-6a9ce764a73a-kube-api-access-mv5r8\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.019255 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e0464310-34e8-4747-9a37-6a9ce764a73a-etc-swift\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.019308 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0464310-34e8-4747-9a37-6a9ce764a73a-run-httpd\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.019361 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-combined-ca-bundle\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.120941 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0464310-34e8-4747-9a37-6a9ce764a73a-run-httpd\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.122010 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-combined-ca-bundle\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.122181 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-config-data\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.122352 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0464310-34e8-4747-9a37-6a9ce764a73a-log-httpd\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.122510 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-internal-tls-certs\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.122694 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-public-tls-certs\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.123005 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mv5r8\" (UniqueName: \"kubernetes.io/projected/e0464310-34e8-4747-9a37-6a9ce764a73a-kube-api-access-mv5r8\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.123170 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e0464310-34e8-4747-9a37-6a9ce764a73a-etc-swift\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.122195 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0464310-34e8-4747-9a37-6a9ce764a73a-run-httpd\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.126296 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e0464310-34e8-4747-9a37-6a9ce764a73a-log-httpd\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.130667 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-combined-ca-bundle\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.131975 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/e0464310-34e8-4747-9a37-6a9ce764a73a-etc-swift\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.135629 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-config-data\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.140060 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-public-tls-certs\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.141456 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e0464310-34e8-4747-9a37-6a9ce764a73a-internal-tls-certs\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.164676 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv5r8\" (UniqueName: \"kubernetes.io/projected/e0464310-34e8-4747-9a37-6a9ce764a73a-kube-api-access-mv5r8\") pod \"swift-proxy-646f4c575-wzbtn\" (UID: \"e0464310-34e8-4747-9a37-6a9ce764a73a\") " pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.289278 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.350000 4948 generic.go:334] "Generic (PLEG): container finished" podID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerID="c841b5fa069dfe5a6fb9a7bfd4a789f0ae4ffbaab7e5270f29a883038b3d172f" exitCode=0 Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.350216 4948 generic.go:334] "Generic (PLEG): container finished" podID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerID="56f79db8b2d0ba9877ee75f5fb6727f5e0c0c6d653fad44bf2b97a23f46d95c4" exitCode=2 Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.350089 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerDied","Data":"c841b5fa069dfe5a6fb9a7bfd4a789f0ae4ffbaab7e5270f29a883038b3d172f"} Jan 20 20:07:44 crc kubenswrapper[4948]: I0120 20:07:44.350393 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerDied","Data":"56f79db8b2d0ba9877ee75f5fb6727f5e0c0c6d653fad44bf2b97a23f46d95c4"} Jan 20 20:07:45 crc kubenswrapper[4948]: I0120 20:07:45.167182 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 20 20:07:45 crc kubenswrapper[4948]: I0120 20:07:45.388214 4948 generic.go:334] "Generic (PLEG): container finished" podID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerID="967941366e604b4d950bf3d9619707dd25f4eaaa548c6ced7375fadc22974fc6" exitCode=0 Jan 20 20:07:45 crc kubenswrapper[4948]: I0120 20:07:45.388257 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerDied","Data":"967941366e604b4d950bf3d9619707dd25f4eaaa548c6ced7375fadc22974fc6"} Jan 20 20:07:48 crc kubenswrapper[4948]: I0120 20:07:48.418259 4948 generic.go:334] "Generic (PLEG): container finished" podID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerID="cf7ffd612025ead678392921343d34c52b2036b6245ddd684837d138126544f9" exitCode=0 Jan 20 20:07:48 crc kubenswrapper[4948]: I0120 20:07:48.418326 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerDied","Data":"cf7ffd612025ead678392921343d34c52b2036b6245ddd684837d138126544f9"} Jan 20 20:07:48 crc kubenswrapper[4948]: I0120 20:07:48.696652 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.166:3000/\": dial tcp 10.217.0.166:3000: connect: connection refused" Jan 20 20:07:49 crc kubenswrapper[4948]: I0120 20:07:49.392914 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:07:49 crc kubenswrapper[4948]: I0120 20:07:49.393266 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:07:49 crc kubenswrapper[4948]: I0120 20:07:49.394863 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 20 20:07:49 crc kubenswrapper[4948]: I0120 20:07:49.540659 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:07:49 crc kubenswrapper[4948]: I0120 20:07:49.540767 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:07:49 crc kubenswrapper[4948]: I0120 20:07:49.541783 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:07:49 crc kubenswrapper[4948]: I0120 20:07:49.977862 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.0.164:8776/healthcheck\": dial tcp 10.217.0.164:8776: connect: connection refused" Jan 20 20:07:50 crc kubenswrapper[4948]: I0120 20:07:50.249993 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:07:50 crc kubenswrapper[4948]: I0120 20:07:50.250049 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:07:50 crc kubenswrapper[4948]: I0120 20:07:50.565774 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:07:50 crc kubenswrapper[4948]: I0120 20:07:50.566025 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerName="glance-log" containerID="cri-o://d489e8dd56e6b521defd6b93328af99da8729aaeae03d32ebde333ba8c9321de" gracePeriod=30 Jan 20 20:07:50 crc kubenswrapper[4948]: I0120 20:07:50.566440 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerName="glance-httpd" containerID="cri-o://fec5eb47d6b163bbd97d2f2d7a7df78179f0617b26e8b1e9c9d3feace7af8042" gracePeriod=30 Jan 20 20:07:51 crc kubenswrapper[4948]: I0120 20:07:51.464417 4948 generic.go:334] "Generic (PLEG): container finished" podID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerID="d489e8dd56e6b521defd6b93328af99da8729aaeae03d32ebde333ba8c9321de" exitCode=143 Jan 20 20:07:51 crc kubenswrapper[4948]: I0120 20:07:51.464518 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2b8bd9a7-9ee4-4597-ac4e-83691d688db5","Type":"ContainerDied","Data":"d489e8dd56e6b521defd6b93328af99da8729aaeae03d32ebde333ba8c9321de"} Jan 20 20:07:51 crc kubenswrapper[4948]: I0120 20:07:51.470219 4948 generic.go:334] "Generic (PLEG): container finished" podID="5f93da57-3189-424f-952f-7731884075f8" containerID="bd0057d43e437d4afecf99dbbfc5f55d1385b8784e2201192d21bf290177e9e0" exitCode=137 Jan 20 20:07:51 crc kubenswrapper[4948]: I0120 20:07:51.470269 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f93da57-3189-424f-952f-7731884075f8","Type":"ContainerDied","Data":"bd0057d43e437d4afecf99dbbfc5f55d1385b8784e2201192d21bf290177e9e0"} Jan 20 20:07:51 crc kubenswrapper[4948]: E0120 20:07:51.583000 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Jan 20 20:07:51 crc kubenswrapper[4948]: E0120 20:07:51.583226 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n64fh656hc7hc4h654h5d5h565hcdh67fh58bh67ch647h5bh6fh598h655h99hc6h589h588h68fh664h5f6h5f6hb9h576h667h86h699h5bdh589h5d8q,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ftksk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(d1222f27-af2a-46fd-a296-37bdb8db4486): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:07:51 crc kubenswrapper[4948]: E0120 20:07:51.584531 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="d1222f27-af2a-46fd-a296-37bdb8db4486" Jan 20 20:07:51 crc kubenswrapper[4948]: I0120 20:07:51.664144 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:07:51 crc kubenswrapper[4948]: I0120 20:07:51.666623 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" containerName="glance-log" containerID="cri-o://634c2dafb4145d1d96a9a997c1c934c0ea1e2c777db8aa62bfdd7bea6edb028a" gracePeriod=30 Jan 20 20:07:51 crc kubenswrapper[4948]: I0120 20:07:51.666722 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" containerName="glance-httpd" containerID="cri-o://d478d71e2be882fad485d78cde03700f868017416f23b39fe9e63427faa63cde" gracePeriod=30 Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.202850 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.288788 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data-custom\") pod \"5f93da57-3189-424f-952f-7731884075f8\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.289148 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dt492\" (UniqueName: \"kubernetes.io/projected/5f93da57-3189-424f-952f-7731884075f8-kube-api-access-dt492\") pod \"5f93da57-3189-424f-952f-7731884075f8\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.289213 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-scripts\") pod \"5f93da57-3189-424f-952f-7731884075f8\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.289326 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f93da57-3189-424f-952f-7731884075f8-logs\") pod \"5f93da57-3189-424f-952f-7731884075f8\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.289436 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f93da57-3189-424f-952f-7731884075f8-etc-machine-id\") pod \"5f93da57-3189-424f-952f-7731884075f8\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.289476 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data\") pod \"5f93da57-3189-424f-952f-7731884075f8\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.289520 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-combined-ca-bundle\") pod \"5f93da57-3189-424f-952f-7731884075f8\" (UID: \"5f93da57-3189-424f-952f-7731884075f8\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.290536 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5f93da57-3189-424f-952f-7731884075f8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5f93da57-3189-424f-952f-7731884075f8" (UID: "5f93da57-3189-424f-952f-7731884075f8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.294991 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f93da57-3189-424f-952f-7731884075f8-logs" (OuterVolumeSpecName: "logs") pod "5f93da57-3189-424f-952f-7731884075f8" (UID: "5f93da57-3189-424f-952f-7731884075f8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.312639 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.327929 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-scripts" (OuterVolumeSpecName: "scripts") pod "5f93da57-3189-424f-952f-7731884075f8" (UID: "5f93da57-3189-424f-952f-7731884075f8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.329076 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f93da57-3189-424f-952f-7731884075f8-kube-api-access-dt492" (OuterVolumeSpecName: "kube-api-access-dt492") pod "5f93da57-3189-424f-952f-7731884075f8" (UID: "5f93da57-3189-424f-952f-7731884075f8"). InnerVolumeSpecName "kube-api-access-dt492". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.342854 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5f93da57-3189-424f-952f-7731884075f8" (UID: "5f93da57-3189-424f-952f-7731884075f8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.370736 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5f93da57-3189-424f-952f-7731884075f8" (UID: "5f93da57-3189-424f-952f-7731884075f8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391140 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-config-data\") pod \"d51108ae-667c-4f4f-9f7b-99c96c573cca\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391245 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tc9wf\" (UniqueName: \"kubernetes.io/projected/d51108ae-667c-4f4f-9f7b-99c96c573cca-kube-api-access-tc9wf\") pod \"d51108ae-667c-4f4f-9f7b-99c96c573cca\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391331 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-run-httpd\") pod \"d51108ae-667c-4f4f-9f7b-99c96c573cca\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391358 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-combined-ca-bundle\") pod \"d51108ae-667c-4f4f-9f7b-99c96c573cca\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391430 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-scripts\") pod \"d51108ae-667c-4f4f-9f7b-99c96c573cca\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391503 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-sg-core-conf-yaml\") pod \"d51108ae-667c-4f4f-9f7b-99c96c573cca\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391555 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-log-httpd\") pod \"d51108ae-667c-4f4f-9f7b-99c96c573cca\" (UID: \"d51108ae-667c-4f4f-9f7b-99c96c573cca\") " Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391935 4948 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f93da57-3189-424f-952f-7731884075f8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391951 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391959 4948 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391967 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dt492\" (UniqueName: \"kubernetes.io/projected/5f93da57-3189-424f-952f-7731884075f8-kube-api-access-dt492\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391978 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.391989 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f93da57-3189-424f-952f-7731884075f8-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.392905 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d51108ae-667c-4f4f-9f7b-99c96c573cca" (UID: "d51108ae-667c-4f4f-9f7b-99c96c573cca"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.398143 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d51108ae-667c-4f4f-9f7b-99c96c573cca" (UID: "d51108ae-667c-4f4f-9f7b-99c96c573cca"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.404742 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data" (OuterVolumeSpecName: "config-data") pod "5f93da57-3189-424f-952f-7731884075f8" (UID: "5f93da57-3189-424f-952f-7731884075f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.408895 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-scripts" (OuterVolumeSpecName: "scripts") pod "d51108ae-667c-4f4f-9f7b-99c96c573cca" (UID: "d51108ae-667c-4f4f-9f7b-99c96c573cca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.411291 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d51108ae-667c-4f4f-9f7b-99c96c573cca-kube-api-access-tc9wf" (OuterVolumeSpecName: "kube-api-access-tc9wf") pod "d51108ae-667c-4f4f-9f7b-99c96c573cca" (UID: "d51108ae-667c-4f4f-9f7b-99c96c573cca"). InnerVolumeSpecName "kube-api-access-tc9wf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.454958 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d51108ae-667c-4f4f-9f7b-99c96c573cca" (UID: "d51108ae-667c-4f4f-9f7b-99c96c573cca"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.490296 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d51108ae-667c-4f4f-9f7b-99c96c573cca","Type":"ContainerDied","Data":"0685e146920bdd9e668bce3a6d342ffe128b5919d37a71570f5ed34c25ee9695"} Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.490347 4948 scope.go:117] "RemoveContainer" containerID="c841b5fa069dfe5a6fb9a7bfd4a789f0ae4ffbaab7e5270f29a883038b3d172f" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.490482 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.494447 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tc9wf\" (UniqueName: \"kubernetes.io/projected/d51108ae-667c-4f4f-9f7b-99c96c573cca-kube-api-access-tc9wf\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.494475 4948 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.494488 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.494500 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f93da57-3189-424f-952f-7731884075f8-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.494513 4948 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.494524 4948 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d51108ae-667c-4f4f-9f7b-99c96c573cca-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.503688 4948 generic.go:334] "Generic (PLEG): container finished" podID="249e6833-425e-4243-b1ca-6c1b78a752de" containerID="634c2dafb4145d1d96a9a997c1c934c0ea1e2c777db8aa62bfdd7bea6edb028a" exitCode=143 Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.503777 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"249e6833-425e-4243-b1ca-6c1b78a752de","Type":"ContainerDied","Data":"634c2dafb4145d1d96a9a997c1c934c0ea1e2c777db8aa62bfdd7bea6edb028a"} Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.508604 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.510159 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"5f93da57-3189-424f-952f-7731884075f8","Type":"ContainerDied","Data":"ad1c8c77529fe0fe17a1db2b1fee753e1cb7884e58531c9dda96fc4bbb08ffb3"} Jan 20 20:07:52 crc kubenswrapper[4948]: E0120 20:07:52.515194 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="d1222f27-af2a-46fd-a296-37bdb8db4486" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.541920 4948 scope.go:117] "RemoveContainer" containerID="56f79db8b2d0ba9877ee75f5fb6727f5e0c0c6d653fad44bf2b97a23f46d95c4" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.555874 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d51108ae-667c-4f4f-9f7b-99c96c573cca" (UID: "d51108ae-667c-4f4f-9f7b-99c96c573cca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.611524 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.707934 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-config-data" (OuterVolumeSpecName: "config-data") pod "d51108ae-667c-4f4f-9f7b-99c96c573cca" (UID: "d51108ae-667c-4f4f-9f7b-99c96c573cca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.713026 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d51108ae-667c-4f4f-9f7b-99c96c573cca-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.779314 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-646f4c575-wzbtn"] Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.820884 4948 scope.go:117] "RemoveContainer" containerID="cf7ffd612025ead678392921343d34c52b2036b6245ddd684837d138126544f9" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.832627 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.865602 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.889784 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.894403 4948 scope.go:117] "RemoveContainer" containerID="967941366e604b4d950bf3d9619707dd25f4eaaa548c6ced7375fadc22974fc6" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.932838 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:52 crc kubenswrapper[4948]: E0120 20:07:52.933256 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933273 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api" Jan 20 20:07:52 crc kubenswrapper[4948]: E0120 20:07:52.933289 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="ceilometer-notification-agent" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933296 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="ceilometer-notification-agent" Jan 20 20:07:52 crc kubenswrapper[4948]: E0120 20:07:52.933309 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="sg-core" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933316 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="sg-core" Jan 20 20:07:52 crc kubenswrapper[4948]: E0120 20:07:52.933333 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api-log" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933339 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api-log" Jan 20 20:07:52 crc kubenswrapper[4948]: E0120 20:07:52.933348 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="ceilometer-central-agent" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933353 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="ceilometer-central-agent" Jan 20 20:07:52 crc kubenswrapper[4948]: E0120 20:07:52.933363 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="proxy-httpd" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933371 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="proxy-httpd" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933569 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="ceilometer-central-agent" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933586 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="proxy-httpd" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933595 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="ceilometer-notification-agent" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933605 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933619 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f93da57-3189-424f-952f-7731884075f8" containerName="cinder-api-log" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.933629 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" containerName="sg-core" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.934655 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.940102 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.940690 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.940765 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.952895 4948 scope.go:117] "RemoveContainer" containerID="bd0057d43e437d4afecf99dbbfc5f55d1385b8784e2201192d21bf290177e9e0" Jan 20 20:07:52 crc kubenswrapper[4948]: I0120 20:07:52.979229 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.006624 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.013794 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.015861 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021391 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021440 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txk7r\" (UniqueName: \"kubernetes.io/projected/bf15b74a-2849-4970-87a3-83d7e1b788ba-kube-api-access-txk7r\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021511 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-config-data\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021565 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf15b74a-2849-4970-87a3-83d7e1b788ba-logs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021606 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021620 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021645 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-config-data-custom\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021675 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-scripts\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.021693 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bf15b74a-2849-4970-87a3-83d7e1b788ba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.028081 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.029768 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.033009 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.042021 4948 scope.go:117] "RemoveContainer" containerID="d66f639b5e1eaf715bbec8f3da02dc2437de7bf931f7a254d8fe5fd07294c985" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.134113 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-config-data\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.134193 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.134218 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf15b74a-2849-4970-87a3-83d7e1b788ba-logs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.134433 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.134543 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.134953 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bf15b74a-2849-4970-87a3-83d7e1b788ba-logs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.135228 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.135330 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-config-data-custom\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.135406 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-scripts\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.135481 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-scripts\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.136356 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bf15b74a-2849-4970-87a3-83d7e1b788ba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.137256 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bf15b74a-2849-4970-87a3-83d7e1b788ba-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.139228 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-config-data-custom\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.140033 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-log-httpd\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.140245 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.140273 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txk7r\" (UniqueName: \"kubernetes.io/projected/bf15b74a-2849-4970-87a3-83d7e1b788ba-kube-api-access-txk7r\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.140331 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgtbt\" (UniqueName: \"kubernetes.io/projected/da31cdb9-d009-48a3-92f0-5e0102d0096a-kube-api-access-hgtbt\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.140414 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-run-httpd\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.140503 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-config-data\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.141302 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-scripts\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.142397 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.145152 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.146419 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-config-data\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.146881 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bf15b74a-2849-4970-87a3-83d7e1b788ba-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.158149 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txk7r\" (UniqueName: \"kubernetes.io/projected/bf15b74a-2849-4970-87a3-83d7e1b788ba-kube-api-access-txk7r\") pod \"cinder-api-0\" (UID: \"bf15b74a-2849-4970-87a3-83d7e1b788ba\") " pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.242729 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-scripts\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.242793 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-log-httpd\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.242851 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgtbt\" (UniqueName: \"kubernetes.io/projected/da31cdb9-d009-48a3-92f0-5e0102d0096a-kube-api-access-hgtbt\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.242878 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-run-httpd\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.242929 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-config-data\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.242956 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.242980 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.244084 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-log-httpd\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.244093 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-run-httpd\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.247263 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.247737 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-config-data\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.249428 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-scripts\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.250787 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.264333 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgtbt\" (UniqueName: \"kubernetes.io/projected/da31cdb9-d009-48a3-92f0-5e0102d0096a-kube-api-access-hgtbt\") pod \"ceilometer-0\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.293621 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.364420 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.479253 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-pzp8p"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.481360 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.539977 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-pzp8p"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.561245 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69739aba-0e18-493d-9957-8b215b4a2eef-operator-scripts\") pod \"nova-api-db-create-pzp8p\" (UID: \"69739aba-0e18-493d-9957-8b215b4a2eef\") " pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.561566 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtrrl\" (UniqueName: \"kubernetes.io/projected/69739aba-0e18-493d-9957-8b215b4a2eef-kube-api-access-xtrrl\") pod \"nova-api-db-create-pzp8p\" (UID: \"69739aba-0e18-493d-9957-8b215b4a2eef\") " pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.594240 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-646f4c575-wzbtn" event={"ID":"e0464310-34e8-4747-9a37-6a9ce764a73a","Type":"ContainerStarted","Data":"38eb2baa1c1492f08fcd51f5df9933dcc9b88d992a52bd34389ff7e038559a22"} Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.594298 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-646f4c575-wzbtn" event={"ID":"e0464310-34e8-4747-9a37-6a9ce764a73a","Type":"ContainerStarted","Data":"ba3ebd173022e692305576edee8d5b6ad5542f76d0fc5f085f6cf0485efaaa9e"} Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.608767 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-qlvzm"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.610402 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.637890 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-qlvzm"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.668339 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tntk6\" (UniqueName: \"kubernetes.io/projected/f66c168c-985d-43b6-a53d-5613b7a416cc-kube-api-access-tntk6\") pod \"nova-cell0-db-create-qlvzm\" (UID: \"f66c168c-985d-43b6-a53d-5613b7a416cc\") " pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.668442 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69739aba-0e18-493d-9957-8b215b4a2eef-operator-scripts\") pod \"nova-api-db-create-pzp8p\" (UID: \"69739aba-0e18-493d-9957-8b215b4a2eef\") " pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.668478 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f66c168c-985d-43b6-a53d-5613b7a416cc-operator-scripts\") pod \"nova-cell0-db-create-qlvzm\" (UID: \"f66c168c-985d-43b6-a53d-5613b7a416cc\") " pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.668602 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtrrl\" (UniqueName: \"kubernetes.io/projected/69739aba-0e18-493d-9957-8b215b4a2eef-kube-api-access-xtrrl\") pod \"nova-api-db-create-pzp8p\" (UID: \"69739aba-0e18-493d-9957-8b215b4a2eef\") " pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.735135 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69739aba-0e18-493d-9957-8b215b4a2eef-operator-scripts\") pod \"nova-api-db-create-pzp8p\" (UID: \"69739aba-0e18-493d-9957-8b215b4a2eef\") " pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.772006 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tntk6\" (UniqueName: \"kubernetes.io/projected/f66c168c-985d-43b6-a53d-5613b7a416cc-kube-api-access-tntk6\") pod \"nova-cell0-db-create-qlvzm\" (UID: \"f66c168c-985d-43b6-a53d-5613b7a416cc\") " pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.772099 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f66c168c-985d-43b6-a53d-5613b7a416cc-operator-scripts\") pod \"nova-cell0-db-create-qlvzm\" (UID: \"f66c168c-985d-43b6-a53d-5613b7a416cc\") " pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.776302 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f66c168c-985d-43b6-a53d-5613b7a416cc-operator-scripts\") pod \"nova-cell0-db-create-qlvzm\" (UID: \"f66c168c-985d-43b6-a53d-5613b7a416cc\") " pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.802764 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtrrl\" (UniqueName: \"kubernetes.io/projected/69739aba-0e18-493d-9957-8b215b4a2eef-kube-api-access-xtrrl\") pod \"nova-api-db-create-pzp8p\" (UID: \"69739aba-0e18-493d-9957-8b215b4a2eef\") " pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.805198 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tntk6\" (UniqueName: \"kubernetes.io/projected/f66c168c-985d-43b6-a53d-5613b7a416cc-kube-api-access-tntk6\") pod \"nova-cell0-db-create-qlvzm\" (UID: \"f66c168c-985d-43b6-a53d-5613b7a416cc\") " pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.828775 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.904528 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7ec1-account-create-update-269qf"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.914922 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.921286 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.928958 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7ec1-account-create-update-269qf"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.960069 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-r724g"] Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.961326 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.986613 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd73c9ec-8283-44a3-8a72-2fc52180b2df-operator-scripts\") pod \"nova-api-7ec1-account-create-update-269qf\" (UID: \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\") " pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.986716 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5l8r\" (UniqueName: \"kubernetes.io/projected/bd73c9ec-8283-44a3-8a72-2fc52180b2df-kube-api-access-m5l8r\") pod \"nova-api-7ec1-account-create-update-269qf\" (UID: \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\") " pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:07:53 crc kubenswrapper[4948]: I0120 20:07:53.988401 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-r724g"] Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.012902 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-101b-account-create-update-b8krk"] Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.014338 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.019280 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.075812 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-101b-account-create-update-b8krk"] Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.082439 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.087660 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdt77\" (UniqueName: \"kubernetes.io/projected/2c5d2212-ff64-4cb5-964a-0fa269bb0249-kube-api-access-gdt77\") pod \"nova-cell1-db-create-r724g\" (UID: \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\") " pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.087913 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d91976f-4b13-453d-8ee1-9614f4d23edc-operator-scripts\") pod \"nova-cell0-101b-account-create-update-b8krk\" (UID: \"4d91976f-4b13-453d-8ee1-9614f4d23edc\") " pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.088023 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd73c9ec-8283-44a3-8a72-2fc52180b2df-operator-scripts\") pod \"nova-api-7ec1-account-create-update-269qf\" (UID: \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\") " pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.088186 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5l8r\" (UniqueName: \"kubernetes.io/projected/bd73c9ec-8283-44a3-8a72-2fc52180b2df-kube-api-access-m5l8r\") pod \"nova-api-7ec1-account-create-update-269qf\" (UID: \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\") " pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.088310 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twwns\" (UniqueName: \"kubernetes.io/projected/4d91976f-4b13-453d-8ee1-9614f4d23edc-kube-api-access-twwns\") pod \"nova-cell0-101b-account-create-update-b8krk\" (UID: \"4d91976f-4b13-453d-8ee1-9614f4d23edc\") " pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.088412 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c5d2212-ff64-4cb5-964a-0fa269bb0249-operator-scripts\") pod \"nova-cell1-db-create-r724g\" (UID: \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\") " pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.089336 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd73c9ec-8283-44a3-8a72-2fc52180b2df-operator-scripts\") pod \"nova-api-7ec1-account-create-update-269qf\" (UID: \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\") " pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.112511 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.125235 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5l8r\" (UniqueName: \"kubernetes.io/projected/bd73c9ec-8283-44a3-8a72-2fc52180b2df-kube-api-access-m5l8r\") pod \"nova-api-7ec1-account-create-update-269qf\" (UID: \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\") " pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.154277 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.169428 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-28d2-account-create-update-qsqf8"] Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.171262 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:07:54 crc kubenswrapper[4948]: W0120 20:07:54.175873 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf15b74a_2849_4970_87a3_83d7e1b788ba.slice/crio-ed21dc4b3fde8a1aaedcc6b36d06673dc00b8c7baafed6a4997f1d74ba593a19 WatchSource:0}: Error finding container ed21dc4b3fde8a1aaedcc6b36d06673dc00b8c7baafed6a4997f1d74ba593a19: Status 404 returned error can't find the container with id ed21dc4b3fde8a1aaedcc6b36d06673dc00b8c7baafed6a4997f1d74ba593a19 Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.176089 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.200649 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twwns\" (UniqueName: \"kubernetes.io/projected/4d91976f-4b13-453d-8ee1-9614f4d23edc-kube-api-access-twwns\") pod \"nova-cell0-101b-account-create-update-b8krk\" (UID: \"4d91976f-4b13-453d-8ee1-9614f4d23edc\") " pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.200725 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c5d2212-ff64-4cb5-964a-0fa269bb0249-operator-scripts\") pod \"nova-cell1-db-create-r724g\" (UID: \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\") " pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.202863 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c5d2212-ff64-4cb5-964a-0fa269bb0249-operator-scripts\") pod \"nova-cell1-db-create-r724g\" (UID: \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\") " pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.204254 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.207333 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdt77\" (UniqueName: \"kubernetes.io/projected/2c5d2212-ff64-4cb5-964a-0fa269bb0249-kube-api-access-gdt77\") pod \"nova-cell1-db-create-r724g\" (UID: \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\") " pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.207485 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d91976f-4b13-453d-8ee1-9614f4d23edc-operator-scripts\") pod \"nova-cell0-101b-account-create-update-b8krk\" (UID: \"4d91976f-4b13-453d-8ee1-9614f4d23edc\") " pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.209896 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d91976f-4b13-453d-8ee1-9614f4d23edc-operator-scripts\") pod \"nova-cell0-101b-account-create-update-b8krk\" (UID: \"4d91976f-4b13-453d-8ee1-9614f4d23edc\") " pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.229307 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdt77\" (UniqueName: \"kubernetes.io/projected/2c5d2212-ff64-4cb5-964a-0fa269bb0249-kube-api-access-gdt77\") pod \"nova-cell1-db-create-r724g\" (UID: \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\") " pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.230910 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twwns\" (UniqueName: \"kubernetes.io/projected/4d91976f-4b13-453d-8ee1-9614f4d23edc-kube-api-access-twwns\") pod \"nova-cell0-101b-account-create-update-b8krk\" (UID: \"4d91976f-4b13-453d-8ee1-9614f4d23edc\") " pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.248870 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-28d2-account-create-update-qsqf8"] Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.259890 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.310501 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.311534 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51e4eded-1818-4696-a425-227ce9bb1750-operator-scripts\") pod \"nova-cell1-28d2-account-create-update-qsqf8\" (UID: \"51e4eded-1818-4696-a425-227ce9bb1750\") " pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.313873 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2xh2\" (UniqueName: \"kubernetes.io/projected/51e4eded-1818-4696-a425-227ce9bb1750-kube-api-access-g2xh2\") pod \"nova-cell1-28d2-account-create-update-qsqf8\" (UID: \"51e4eded-1818-4696-a425-227ce9bb1750\") " pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.415761 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2xh2\" (UniqueName: \"kubernetes.io/projected/51e4eded-1818-4696-a425-227ce9bb1750-kube-api-access-g2xh2\") pod \"nova-cell1-28d2-account-create-update-qsqf8\" (UID: \"51e4eded-1818-4696-a425-227ce9bb1750\") " pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.415904 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51e4eded-1818-4696-a425-227ce9bb1750-operator-scripts\") pod \"nova-cell1-28d2-account-create-update-qsqf8\" (UID: \"51e4eded-1818-4696-a425-227ce9bb1750\") " pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.416671 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51e4eded-1818-4696-a425-227ce9bb1750-operator-scripts\") pod \"nova-cell1-28d2-account-create-update-qsqf8\" (UID: \"51e4eded-1818-4696-a425-227ce9bb1750\") " pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.468211 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2xh2\" (UniqueName: \"kubernetes.io/projected/51e4eded-1818-4696-a425-227ce9bb1750-kube-api-access-g2xh2\") pod \"nova-cell1-28d2-account-create-update-qsqf8\" (UID: \"51e4eded-1818-4696-a425-227ce9bb1750\") " pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.502305 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:07:54 crc kubenswrapper[4948]: E0120 20:07:54.529767 4948 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b8bd9a7_9ee4_4597_ac4e_83691d688db5.slice/crio-conmon-fec5eb47d6b163bbd97d2f2d7a7df78179f0617b26e8b1e9c9d3feace7af8042.scope\": RecentStats: unable to find data in memory cache]" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.596607 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.654349 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f93da57-3189-424f-952f-7731884075f8" path="/var/lib/kubelet/pods/5f93da57-3189-424f-952f-7731884075f8/volumes" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.655183 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d51108ae-667c-4f4f-9f7b-99c96c573cca" path="/var/lib/kubelet/pods/d51108ae-667c-4f4f-9f7b-99c96c573cca/volumes" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.687503 4948 generic.go:334] "Generic (PLEG): container finished" podID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerID="fec5eb47d6b163bbd97d2f2d7a7df78179f0617b26e8b1e9c9d3feace7af8042" exitCode=0 Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.687561 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2b8bd9a7-9ee4-4597-ac4e-83691d688db5","Type":"ContainerDied","Data":"fec5eb47d6b163bbd97d2f2d7a7df78179f0617b26e8b1e9c9d3feace7af8042"} Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.700394 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-646f4c575-wzbtn" event={"ID":"e0464310-34e8-4747-9a37-6a9ce764a73a","Type":"ContainerStarted","Data":"e2766ffdee060c0fc45b0a6cfb7fb6c2ae42571b04a438a44d62834ccd316159"} Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.701549 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.701575 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.703169 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bf15b74a-2849-4970-87a3-83d7e1b788ba","Type":"ContainerStarted","Data":"ed21dc4b3fde8a1aaedcc6b36d06673dc00b8c7baafed6a4997f1d74ba593a19"} Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.724146 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerStarted","Data":"e3a75f21d53be0836036029a88478d5fac3c9d0aa06b01461a48dd3fcaa51725"} Jan 20 20:07:54 crc kubenswrapper[4948]: I0120 20:07:54.746274 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-646f4c575-wzbtn" podStartSLOduration=11.746255713 podStartE2EDuration="11.746255713s" podCreationTimestamp="2026-01-20 20:07:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:54.740059338 +0000 UTC m=+1102.690784307" watchObservedRunningTime="2026-01-20 20:07:54.746255713 +0000 UTC m=+1102.696980682" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.013386 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-pzp8p"] Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.341650 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-qlvzm"] Jan 20 20:07:55 crc kubenswrapper[4948]: W0120 20:07:55.375983 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf66c168c_985d_43b6_a53d_5613b7a416cc.slice/crio-d8b4b1bb79b801b813fdd2bedeff3d9647c0a99b6ea949a2b47a7f056986c2f0 WatchSource:0}: Error finding container d8b4b1bb79b801b813fdd2bedeff3d9647c0a99b6ea949a2b47a7f056986c2f0: Status 404 returned error can't find the container with id d8b4b1bb79b801b813fdd2bedeff3d9647c0a99b6ea949a2b47a7f056986c2f0 Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.582410 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.674319 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-combined-ca-bundle\") pod \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.674396 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.674432 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-logs\") pod \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.674466 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-config-data\") pod \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.674516 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hb6d\" (UniqueName: \"kubernetes.io/projected/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-kube-api-access-6hb6d\") pod \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.674568 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-httpd-run\") pod \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.674604 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-scripts\") pod \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.674650 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-public-tls-certs\") pod \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\" (UID: \"2b8bd9a7-9ee4-4597-ac4e-83691d688db5\") " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.677137 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-logs" (OuterVolumeSpecName: "logs") pod "2b8bd9a7-9ee4-4597-ac4e-83691d688db5" (UID: "2b8bd9a7-9ee4-4597-ac4e-83691d688db5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.681795 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2b8bd9a7-9ee4-4597-ac4e-83691d688db5" (UID: "2b8bd9a7-9ee4-4597-ac4e-83691d688db5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.697754 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "2b8bd9a7-9ee4-4597-ac4e-83691d688db5" (UID: "2b8bd9a7-9ee4-4597-ac4e-83691d688db5"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.702993 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-kube-api-access-6hb6d" (OuterVolumeSpecName: "kube-api-access-6hb6d") pod "2b8bd9a7-9ee4-4597-ac4e-83691d688db5" (UID: "2b8bd9a7-9ee4-4597-ac4e-83691d688db5"). InnerVolumeSpecName "kube-api-access-6hb6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.721862 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-scripts" (OuterVolumeSpecName: "scripts") pod "2b8bd9a7-9ee4-4597-ac4e-83691d688db5" (UID: "2b8bd9a7-9ee4-4597-ac4e-83691d688db5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.779341 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.779393 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.779403 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hb6d\" (UniqueName: \"kubernetes.io/projected/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-kube-api-access-6hb6d\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.779415 4948 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.779423 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.895457 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b8bd9a7-9ee4-4597-ac4e-83691d688db5" (UID: "2b8bd9a7-9ee4-4597-ac4e-83691d688db5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.900514 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2b8bd9a7-9ee4-4597-ac4e-83691d688db5","Type":"ContainerDied","Data":"dd2e1c482e1f85060d65d814dc7299e219496bd239b4749a7b94b2a365bc3aeb"} Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.903050 4948 scope.go:117] "RemoveContainer" containerID="fec5eb47d6b163bbd97d2f2d7a7df78179f0617b26e8b1e9c9d3feace7af8042" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.903355 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.919353 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.924157 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-qlvzm" event={"ID":"f66c168c-985d-43b6-a53d-5613b7a416cc","Type":"ContainerStarted","Data":"d8b4b1bb79b801b813fdd2bedeff3d9647c0a99b6ea949a2b47a7f056986c2f0"} Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.945460 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "2b8bd9a7-9ee4-4597-ac4e-83691d688db5" (UID: "2b8bd9a7-9ee4-4597-ac4e-83691d688db5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.964042 4948 generic.go:334] "Generic (PLEG): container finished" podID="249e6833-425e-4243-b1ca-6c1b78a752de" containerID="d478d71e2be882fad485d78cde03700f868017416f23b39fe9e63427faa63cde" exitCode=0 Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.964123 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"249e6833-425e-4243-b1ca-6c1b78a752de","Type":"ContainerDied","Data":"d478d71e2be882fad485d78cde03700f868017416f23b39fe9e63427faa63cde"} Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.981155 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-pzp8p" event={"ID":"69739aba-0e18-493d-9957-8b215b4a2eef","Type":"ContainerStarted","Data":"b0c4c89ef8600cc8cabc0c67c87b43a956cda83db560c7c6a4d4c13a84142005"} Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.981197 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-pzp8p" event={"ID":"69739aba-0e18-493d-9957-8b215b4a2eef","Type":"ContainerStarted","Data":"12717de7b0bb57fb36a4f6c8c8a80c56e2c52e7c29015f3c900e13d079b6de02"} Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.998309 4948 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.998360 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:55 crc kubenswrapper[4948]: I0120 20:07:55.998370 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.003516 4948 scope.go:117] "RemoveContainer" containerID="d489e8dd56e6b521defd6b93328af99da8729aaeae03d32ebde333ba8c9321de" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.062777 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-r724g"] Jan 20 20:07:56 crc kubenswrapper[4948]: W0120 20:07:56.107000 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd73c9ec_8283_44a3_8a72_2fc52180b2df.slice/crio-00937459626fea14cb36ecc311da06791bae5856a435276868ee48e10ba2b62d WatchSource:0}: Error finding container 00937459626fea14cb36ecc311da06791bae5856a435276868ee48e10ba2b62d: Status 404 returned error can't find the container with id 00937459626fea14cb36ecc311da06791bae5856a435276868ee48e10ba2b62d Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.116055 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-101b-account-create-update-b8krk"] Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.139954 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7ec1-account-create-update-269qf"] Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.143673 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-pzp8p" podStartSLOduration=3.143651929 podStartE2EDuration="3.143651929s" podCreationTimestamp="2026-01-20 20:07:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:56.023747298 +0000 UTC m=+1103.974472267" watchObservedRunningTime="2026-01-20 20:07:56.143651929 +0000 UTC m=+1104.094376898" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.154193 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-config-data" (OuterVolumeSpecName: "config-data") pod "2b8bd9a7-9ee4-4597-ac4e-83691d688db5" (UID: "2b8bd9a7-9ee4-4597-ac4e-83691d688db5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.191692 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-28d2-account-create-update-qsqf8"] Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.210852 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b8bd9a7-9ee4-4597-ac4e-83691d688db5-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.249034 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.280664 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.308399 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.311610 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-logs\") pod \"249e6833-425e-4243-b1ca-6c1b78a752de\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.312572 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-scripts\") pod \"249e6833-425e-4243-b1ca-6c1b78a752de\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.312625 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-httpd-run\") pod \"249e6833-425e-4243-b1ca-6c1b78a752de\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.312656 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"249e6833-425e-4243-b1ca-6c1b78a752de\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.312734 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-combined-ca-bundle\") pod \"249e6833-425e-4243-b1ca-6c1b78a752de\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.313164 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-internal-tls-certs\") pod \"249e6833-425e-4243-b1ca-6c1b78a752de\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.313243 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7qxv\" (UniqueName: \"kubernetes.io/projected/249e6833-425e-4243-b1ca-6c1b78a752de-kube-api-access-t7qxv\") pod \"249e6833-425e-4243-b1ca-6c1b78a752de\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.313297 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-config-data\") pod \"249e6833-425e-4243-b1ca-6c1b78a752de\" (UID: \"249e6833-425e-4243-b1ca-6c1b78a752de\") " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.316713 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-logs" (OuterVolumeSpecName: "logs") pod "249e6833-425e-4243-b1ca-6c1b78a752de" (UID: "249e6833-425e-4243-b1ca-6c1b78a752de"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.317354 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "249e6833-425e-4243-b1ca-6c1b78a752de" (UID: "249e6833-425e-4243-b1ca-6c1b78a752de"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.327655 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-scripts" (OuterVolumeSpecName: "scripts") pod "249e6833-425e-4243-b1ca-6c1b78a752de" (UID: "249e6833-425e-4243-b1ca-6c1b78a752de"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.338545 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/249e6833-425e-4243-b1ca-6c1b78a752de-kube-api-access-t7qxv" (OuterVolumeSpecName: "kube-api-access-t7qxv") pod "249e6833-425e-4243-b1ca-6c1b78a752de" (UID: "249e6833-425e-4243-b1ca-6c1b78a752de"). InnerVolumeSpecName "kube-api-access-t7qxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.340059 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "249e6833-425e-4243-b1ca-6c1b78a752de" (UID: "249e6833-425e-4243-b1ca-6c1b78a752de"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.346501 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:07:56 crc kubenswrapper[4948]: E0120 20:07:56.347076 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" containerName="glance-httpd" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.347145 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" containerName="glance-httpd" Jan 20 20:07:56 crc kubenswrapper[4948]: E0120 20:07:56.347306 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" containerName="glance-log" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.347385 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" containerName="glance-log" Jan 20 20:07:56 crc kubenswrapper[4948]: E0120 20:07:56.347455 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerName="glance-log" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.347505 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerName="glance-log" Jan 20 20:07:56 crc kubenswrapper[4948]: E0120 20:07:56.347570 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerName="glance-httpd" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.347620 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerName="glance-httpd" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.347895 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" containerName="glance-log" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.347970 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" containerName="glance-httpd" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.353492 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerName="glance-log" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.353734 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" containerName="glance-httpd" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.355113 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.364197 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.364441 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.370496 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "249e6833-425e-4243-b1ca-6c1b78a752de" (UID: "249e6833-425e-4243-b1ca-6c1b78a752de"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416139 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416201 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-db8rh\" (UniqueName: \"kubernetes.io/projected/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-kube-api-access-db8rh\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416232 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-scripts\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416268 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-logs\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416297 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416367 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416406 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416430 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-config-data\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416508 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7qxv\" (UniqueName: \"kubernetes.io/projected/249e6833-425e-4243-b1ca-6c1b78a752de-kube-api-access-t7qxv\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416526 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416535 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416544 4948 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/249e6833-425e-4243-b1ca-6c1b78a752de-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416565 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.416574 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.421406 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.517685 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527081 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527202 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527235 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-config-data\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527361 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527414 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-db8rh\" (UniqueName: \"kubernetes.io/projected/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-kube-api-access-db8rh\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527449 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-scripts\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527517 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-logs\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527563 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.527852 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.537269 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.537276 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-logs\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.544803 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.563296 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-db8rh\" (UniqueName: \"kubernetes.io/projected/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-kube-api-access-db8rh\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.565208 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.576389 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.578588 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-config-data\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.582987 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf-scripts\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.603130 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b8bd9a7-9ee4-4597-ac4e-83691d688db5" path="/var/lib/kubelet/pods/2b8bd9a7-9ee4-4597-ac4e-83691d688db5/volumes" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.645796 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "249e6833-425e-4243-b1ca-6c1b78a752de" (UID: "249e6833-425e-4243-b1ca-6c1b78a752de"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.647090 4948 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.678215 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-config-data" (OuterVolumeSpecName: "config-data") pod "249e6833-425e-4243-b1ca-6c1b78a752de" (UID: "249e6833-425e-4243-b1ca-6c1b78a752de"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.714649 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf\") " pod="openstack/glance-default-external-api-0" Jan 20 20:07:56 crc kubenswrapper[4948]: I0120 20:07:56.748678 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/249e6833-425e-4243-b1ca-6c1b78a752de-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.000040 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.006789 4948 generic.go:334] "Generic (PLEG): container finished" podID="f66c168c-985d-43b6-a53d-5613b7a416cc" containerID="bce482f8eeeb13a5700a2d2b6a3fc1857951c48729aaba23b374e3ce5522de1d" exitCode=0 Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.009960 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bf15b74a-2849-4970-87a3-83d7e1b788ba","Type":"ContainerStarted","Data":"762c3f1d12bfae3d69c44524ea0560e780ccd533d88a1448dfd2a6b33d39ce04"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.009995 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-qlvzm" event={"ID":"f66c168c-985d-43b6-a53d-5613b7a416cc","Type":"ContainerDied","Data":"bce482f8eeeb13a5700a2d2b6a3fc1857951c48729aaba23b374e3ce5522de1d"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.013409 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7ec1-account-create-update-269qf" event={"ID":"bd73c9ec-8283-44a3-8a72-2fc52180b2df","Type":"ContainerStarted","Data":"00937459626fea14cb36ecc311da06791bae5856a435276868ee48e10ba2b62d"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.017646 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerStarted","Data":"7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.035494 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"249e6833-425e-4243-b1ca-6c1b78a752de","Type":"ContainerDied","Data":"addc1331ceddb6f7d9a451e3c9646b19f3f21f22acd4b55db3e734991e66ce66"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.035562 4948 scope.go:117] "RemoveContainer" containerID="d478d71e2be882fad485d78cde03700f868017416f23b39fe9e63427faa63cde" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.035790 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.044224 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-101b-account-create-update-b8krk" event={"ID":"4d91976f-4b13-453d-8ee1-9614f4d23edc","Type":"ContainerStarted","Data":"c45cd038ea8a5c63078f2aa584a1bd1dbbaab6f2921cdf9e910d8a572a4d5f64"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.053415 4948 generic.go:334] "Generic (PLEG): container finished" podID="69739aba-0e18-493d-9957-8b215b4a2eef" containerID="b0c4c89ef8600cc8cabc0c67c87b43a956cda83db560c7c6a4d4c13a84142005" exitCode=0 Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.053508 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-pzp8p" event={"ID":"69739aba-0e18-493d-9957-8b215b4a2eef","Type":"ContainerDied","Data":"b0c4c89ef8600cc8cabc0c67c87b43a956cda83db560c7c6a4d4c13a84142005"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.061651 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" event={"ID":"51e4eded-1818-4696-a425-227ce9bb1750","Type":"ContainerStarted","Data":"21f1c76207847407232500f3f092228cd501873534d4becc1a80a841d2f5837e"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.065679 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-r724g" event={"ID":"2c5d2212-ff64-4cb5-964a-0fa269bb0249","Type":"ContainerStarted","Data":"9028da644f8159aa871cf8dd7a1630d4c16ba7e4a389a5d28d40efea735e4ed6"} Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.135959 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.145793 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.156777 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.158681 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.170316 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.170555 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.177986 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.178150 4948 scope.go:117] "RemoveContainer" containerID="634c2dafb4145d1d96a9a997c1c934c0ea1e2c777db8aa62bfdd7bea6edb028a" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.297889 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.298089 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2bhb\" (UniqueName: \"kubernetes.io/projected/2f39439c-442b-407e-9b64-ed1a23e6a97c-kube-api-access-d2bhb\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.298127 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.298199 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f39439c-442b-407e-9b64-ed1a23e6a97c-logs\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.298325 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2f39439c-442b-407e-9b64-ed1a23e6a97c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.298409 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.298467 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.298555 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.400621 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2f39439c-442b-407e-9b64-ed1a23e6a97c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.400930 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.400956 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.401229 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.401395 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.401569 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2bhb\" (UniqueName: \"kubernetes.io/projected/2f39439c-442b-407e-9b64-ed1a23e6a97c-kube-api-access-d2bhb\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.401607 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.401687 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f39439c-442b-407e-9b64-ed1a23e6a97c-logs\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.403265 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f39439c-442b-407e-9b64-ed1a23e6a97c-logs\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.408360 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.409297 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2f39439c-442b-407e-9b64-ed1a23e6a97c-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.409605 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.411477 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.412614 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.412902 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f39439c-442b-407e-9b64-ed1a23e6a97c-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.438497 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2bhb\" (UniqueName: \"kubernetes.io/projected/2f39439c-442b-407e-9b64-ed1a23e6a97c-kube-api-access-d2bhb\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.467357 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"2f39439c-442b-407e-9b64-ed1a23e6a97c\") " pod="openstack/glance-default-internal-api-0" Jan 20 20:07:57 crc kubenswrapper[4948]: I0120 20:07:57.551224 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.093538 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" event={"ID":"51e4eded-1818-4696-a425-227ce9bb1750","Type":"ContainerStarted","Data":"08f8ffc93fe751bf13d32f5e10ca0e9ec3390d312d570a3611411ea83a128832"} Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.105362 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-r724g" event={"ID":"2c5d2212-ff64-4cb5-964a-0fa269bb0249","Type":"ContainerStarted","Data":"f842760f17310ee306f18fd6c7dfc7b6c6450b6e940d2118cde72af473823627"} Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.129407 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7ec1-account-create-update-269qf" event={"ID":"bd73c9ec-8283-44a3-8a72-2fc52180b2df","Type":"ContainerStarted","Data":"d6c35c80791bf13765cbe351ab6738d7a45606c31086bc37aee4022510099afa"} Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.131808 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerStarted","Data":"1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29"} Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.135430 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" podStartSLOduration=4.13538197 podStartE2EDuration="4.13538197s" podCreationTimestamp="2026-01-20 20:07:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:58.127118856 +0000 UTC m=+1106.077843825" watchObservedRunningTime="2026-01-20 20:07:58.13538197 +0000 UTC m=+1106.086106939" Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.141713 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-101b-account-create-update-b8krk" event={"ID":"4d91976f-4b13-453d-8ee1-9614f4d23edc","Type":"ContainerStarted","Data":"64bc5b2f28dc731eea9464efc9ec35063f827c5a359f7460c5a50500a4c00e18"} Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.183860 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-r724g" podStartSLOduration=5.18384208 podStartE2EDuration="5.18384208s" podCreationTimestamp="2026-01-20 20:07:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:58.181082792 +0000 UTC m=+1106.131807761" watchObservedRunningTime="2026-01-20 20:07:58.18384208 +0000 UTC m=+1106.134567049" Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.250420 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-101b-account-create-update-b8krk" podStartSLOduration=5.250395672 podStartE2EDuration="5.250395672s" podCreationTimestamp="2026-01-20 20:07:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:58.217515233 +0000 UTC m=+1106.168240202" watchObservedRunningTime="2026-01-20 20:07:58.250395672 +0000 UTC m=+1106.201120641" Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.257809 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-7ec1-account-create-update-269qf" podStartSLOduration=5.257789221 podStartE2EDuration="5.257789221s" podCreationTimestamp="2026-01-20 20:07:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:58.254863849 +0000 UTC m=+1106.205588818" watchObservedRunningTime="2026-01-20 20:07:58.257789221 +0000 UTC m=+1106.208514190" Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.258059 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 20 20:07:58 crc kubenswrapper[4948]: W0120 20:07:58.311155 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc35f0ddf_3894_4ab3_bfa1_d55fbc83a4bf.slice/crio-e01eee66059fd38c800ab8a1cbb29f71fb5166db29c5e98cc54343976521469c WatchSource:0}: Error finding container e01eee66059fd38c800ab8a1cbb29f71fb5166db29c5e98cc54343976521469c: Status 404 returned error can't find the container with id e01eee66059fd38c800ab8a1cbb29f71fb5166db29c5e98cc54343976521469c Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.633890 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="249e6833-425e-4243-b1ca-6c1b78a752de" path="/var/lib/kubelet/pods/249e6833-425e-4243-b1ca-6c1b78a752de/volumes" Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.806900 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 20 20:07:58 crc kubenswrapper[4948]: I0120 20:07:58.945505 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.120928 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tntk6\" (UniqueName: \"kubernetes.io/projected/f66c168c-985d-43b6-a53d-5613b7a416cc-kube-api-access-tntk6\") pod \"f66c168c-985d-43b6-a53d-5613b7a416cc\" (UID: \"f66c168c-985d-43b6-a53d-5613b7a416cc\") " Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.121232 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f66c168c-985d-43b6-a53d-5613b7a416cc-operator-scripts\") pod \"f66c168c-985d-43b6-a53d-5613b7a416cc\" (UID: \"f66c168c-985d-43b6-a53d-5613b7a416cc\") " Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.125496 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f66c168c-985d-43b6-a53d-5613b7a416cc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f66c168c-985d-43b6-a53d-5613b7a416cc" (UID: "f66c168c-985d-43b6-a53d-5613b7a416cc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.131273 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f66c168c-985d-43b6-a53d-5613b7a416cc-kube-api-access-tntk6" (OuterVolumeSpecName: "kube-api-access-tntk6") pod "f66c168c-985d-43b6-a53d-5613b7a416cc" (UID: "f66c168c-985d-43b6-a53d-5613b7a416cc"). InnerVolumeSpecName "kube-api-access-tntk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.172493 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.174438 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf","Type":"ContainerStarted","Data":"e01eee66059fd38c800ab8a1cbb29f71fb5166db29c5e98cc54343976521469c"} Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.188280 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bf15b74a-2849-4970-87a3-83d7e1b788ba","Type":"ContainerStarted","Data":"3beb7cf7570f31bf26946659ababe473086c802da70791a2efd952c65ac2b944"} Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.189330 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.219058 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-qlvzm" event={"ID":"f66c168c-985d-43b6-a53d-5613b7a416cc","Type":"ContainerDied","Data":"d8b4b1bb79b801b813fdd2bedeff3d9647c0a99b6ea949a2b47a7f056986c2f0"} Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.219114 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8b4b1bb79b801b813fdd2bedeff3d9647c0a99b6ea949a2b47a7f056986c2f0" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.219138 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-qlvzm" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.225425 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f66c168c-985d-43b6-a53d-5613b7a416cc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.225459 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tntk6\" (UniqueName: \"kubernetes.io/projected/f66c168c-985d-43b6-a53d-5613b7a416cc-kube-api-access-tntk6\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.271527 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.271501547 podStartE2EDuration="7.271501547s" podCreationTimestamp="2026-01-20 20:07:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:07:59.23943458 +0000 UTC m=+1107.190159549" watchObservedRunningTime="2026-01-20 20:07:59.271501547 +0000 UTC m=+1107.222226506" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.284253 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-pzp8p" event={"ID":"69739aba-0e18-493d-9957-8b215b4a2eef","Type":"ContainerDied","Data":"12717de7b0bb57fb36a4f6c8c8a80c56e2c52e7c29015f3c900e13d079b6de02"} Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.284292 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12717de7b0bb57fb36a4f6c8c8a80c56e2c52e7c29015f3c900e13d079b6de02" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.284349 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-pzp8p" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.295270 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2f39439c-442b-407e-9b64-ed1a23e6a97c","Type":"ContainerStarted","Data":"00c99f4e9c8c24a301c14f94f58e20fd8d5673157453c5c90f305d6b673d866f"} Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.320079 4948 generic.go:334] "Generic (PLEG): container finished" podID="2c5d2212-ff64-4cb5-964a-0fa269bb0249" containerID="f842760f17310ee306f18fd6c7dfc7b6c6450b6e940d2118cde72af473823627" exitCode=0 Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.321166 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-r724g" event={"ID":"2c5d2212-ff64-4cb5-964a-0fa269bb0249","Type":"ContainerDied","Data":"f842760f17310ee306f18fd6c7dfc7b6c6450b6e940d2118cde72af473823627"} Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.329626 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69739aba-0e18-493d-9957-8b215b4a2eef-operator-scripts\") pod \"69739aba-0e18-493d-9957-8b215b4a2eef\" (UID: \"69739aba-0e18-493d-9957-8b215b4a2eef\") " Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.329670 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtrrl\" (UniqueName: \"kubernetes.io/projected/69739aba-0e18-493d-9957-8b215b4a2eef-kube-api-access-xtrrl\") pod \"69739aba-0e18-493d-9957-8b215b4a2eef\" (UID: \"69739aba-0e18-493d-9957-8b215b4a2eef\") " Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.330638 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69739aba-0e18-493d-9957-8b215b4a2eef-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "69739aba-0e18-493d-9957-8b215b4a2eef" (UID: "69739aba-0e18-493d-9957-8b215b4a2eef"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.332138 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69739aba-0e18-493d-9957-8b215b4a2eef-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.343521 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69739aba-0e18-493d-9957-8b215b4a2eef-kube-api-access-xtrrl" (OuterVolumeSpecName: "kube-api-access-xtrrl") pod "69739aba-0e18-493d-9957-8b215b4a2eef" (UID: "69739aba-0e18-493d-9957-8b215b4a2eef"). InnerVolumeSpecName "kube-api-access-xtrrl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.352244 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.353833 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-646f4c575-wzbtn" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.439675 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.446567 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtrrl\" (UniqueName: \"kubernetes.io/projected/69739aba-0e18-493d-9957-8b215b4a2eef-kube-api-access-xtrrl\") on node \"crc\" DevicePath \"\"" Jan 20 20:07:59 crc kubenswrapper[4948]: I0120 20:07:59.546207 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.380198 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf","Type":"ContainerStarted","Data":"3cd675b1356429f192651ce42821fd81dc8763de5cfd46f61af3590b94a4e2dc"} Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.408552 4948 generic.go:334] "Generic (PLEG): container finished" podID="bd73c9ec-8283-44a3-8a72-2fc52180b2df" containerID="d6c35c80791bf13765cbe351ab6738d7a45606c31086bc37aee4022510099afa" exitCode=0 Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.408767 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7ec1-account-create-update-269qf" event={"ID":"bd73c9ec-8283-44a3-8a72-2fc52180b2df","Type":"ContainerDied","Data":"d6c35c80791bf13765cbe351ab6738d7a45606c31086bc37aee4022510099afa"} Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.418145 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerStarted","Data":"218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870"} Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.421105 4948 generic.go:334] "Generic (PLEG): container finished" podID="4d91976f-4b13-453d-8ee1-9614f4d23edc" containerID="64bc5b2f28dc731eea9464efc9ec35063f827c5a359f7460c5a50500a4c00e18" exitCode=0 Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.421168 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-101b-account-create-update-b8krk" event={"ID":"4d91976f-4b13-453d-8ee1-9614f4d23edc","Type":"ContainerDied","Data":"64bc5b2f28dc731eea9464efc9ec35063f827c5a359f7460c5a50500a4c00e18"} Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.436415 4948 generic.go:334] "Generic (PLEG): container finished" podID="51e4eded-1818-4696-a425-227ce9bb1750" containerID="08f8ffc93fe751bf13d32f5e10ca0e9ec3390d312d570a3611411ea83a128832" exitCode=0 Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.436534 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" event={"ID":"51e4eded-1818-4696-a425-227ce9bb1750","Type":"ContainerDied","Data":"08f8ffc93fe751bf13d32f5e10ca0e9ec3390d312d570a3611411ea83a128832"} Jan 20 20:08:00 crc kubenswrapper[4948]: I0120 20:08:00.446170 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2f39439c-442b-407e-9b64-ed1a23e6a97c","Type":"ContainerStarted","Data":"6035b014cfd37e6a8879f8911daadf8bd8140f0579b206f1e5e17a83dd15f3dd"} Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.018225 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.124637 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c5d2212-ff64-4cb5-964a-0fa269bb0249-operator-scripts\") pod \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\" (UID: \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\") " Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.124829 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdt77\" (UniqueName: \"kubernetes.io/projected/2c5d2212-ff64-4cb5-964a-0fa269bb0249-kube-api-access-gdt77\") pod \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\" (UID: \"2c5d2212-ff64-4cb5-964a-0fa269bb0249\") " Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.125566 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c5d2212-ff64-4cb5-964a-0fa269bb0249-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2c5d2212-ff64-4cb5-964a-0fa269bb0249" (UID: "2c5d2212-ff64-4cb5-964a-0fa269bb0249"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.133888 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c5d2212-ff64-4cb5-964a-0fa269bb0249-kube-api-access-gdt77" (OuterVolumeSpecName: "kube-api-access-gdt77") pod "2c5d2212-ff64-4cb5-964a-0fa269bb0249" (UID: "2c5d2212-ff64-4cb5-964a-0fa269bb0249"). InnerVolumeSpecName "kube-api-access-gdt77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.227732 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c5d2212-ff64-4cb5-964a-0fa269bb0249-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.227765 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdt77\" (UniqueName: \"kubernetes.io/projected/2c5d2212-ff64-4cb5-964a-0fa269bb0249-kube-api-access-gdt77\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.458255 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-r724g" Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.458256 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-r724g" event={"ID":"2c5d2212-ff64-4cb5-964a-0fa269bb0249","Type":"ContainerDied","Data":"9028da644f8159aa871cf8dd7a1630d4c16ba7e4a389a5d28d40efea735e4ed6"} Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.458777 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9028da644f8159aa871cf8dd7a1630d4c16ba7e4a389a5d28d40efea735e4ed6" Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.464549 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf","Type":"ContainerStarted","Data":"f83e0623c2c8c928b2e015bbd42e11f56031c0c11739168479a9b2307cedc6cf"} Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.946150 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:08:01 crc kubenswrapper[4948]: I0120 20:08:01.982253 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.98222727 podStartE2EDuration="5.98222727s" podCreationTimestamp="2026-01-20 20:07:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:08:01.493267783 +0000 UTC m=+1109.443992752" watchObservedRunningTime="2026-01-20 20:08:01.98222727 +0000 UTC m=+1109.932952239" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.059373 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5l8r\" (UniqueName: \"kubernetes.io/projected/bd73c9ec-8283-44a3-8a72-2fc52180b2df-kube-api-access-m5l8r\") pod \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\" (UID: \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\") " Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.059642 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd73c9ec-8283-44a3-8a72-2fc52180b2df-operator-scripts\") pod \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\" (UID: \"bd73c9ec-8283-44a3-8a72-2fc52180b2df\") " Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.061025 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd73c9ec-8283-44a3-8a72-2fc52180b2df-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd73c9ec-8283-44a3-8a72-2fc52180b2df" (UID: "bd73c9ec-8283-44a3-8a72-2fc52180b2df"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.074055 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd73c9ec-8283-44a3-8a72-2fc52180b2df-kube-api-access-m5l8r" (OuterVolumeSpecName: "kube-api-access-m5l8r") pod "bd73c9ec-8283-44a3-8a72-2fc52180b2df" (UID: "bd73c9ec-8283-44a3-8a72-2fc52180b2df"). InnerVolumeSpecName "kube-api-access-m5l8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.161996 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5l8r\" (UniqueName: \"kubernetes.io/projected/bd73c9ec-8283-44a3-8a72-2fc52180b2df-kube-api-access-m5l8r\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.162032 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd73c9ec-8283-44a3-8a72-2fc52180b2df-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.260306 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.271690 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.365033 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2xh2\" (UniqueName: \"kubernetes.io/projected/51e4eded-1818-4696-a425-227ce9bb1750-kube-api-access-g2xh2\") pod \"51e4eded-1818-4696-a425-227ce9bb1750\" (UID: \"51e4eded-1818-4696-a425-227ce9bb1750\") " Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.365234 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51e4eded-1818-4696-a425-227ce9bb1750-operator-scripts\") pod \"51e4eded-1818-4696-a425-227ce9bb1750\" (UID: \"51e4eded-1818-4696-a425-227ce9bb1750\") " Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.367330 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51e4eded-1818-4696-a425-227ce9bb1750-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "51e4eded-1818-4696-a425-227ce9bb1750" (UID: "51e4eded-1818-4696-a425-227ce9bb1750"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.372884 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51e4eded-1818-4696-a425-227ce9bb1750-kube-api-access-g2xh2" (OuterVolumeSpecName: "kube-api-access-g2xh2") pod "51e4eded-1818-4696-a425-227ce9bb1750" (UID: "51e4eded-1818-4696-a425-227ce9bb1750"). InnerVolumeSpecName "kube-api-access-g2xh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.467137 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d91976f-4b13-453d-8ee1-9614f4d23edc-operator-scripts\") pod \"4d91976f-4b13-453d-8ee1-9614f4d23edc\" (UID: \"4d91976f-4b13-453d-8ee1-9614f4d23edc\") " Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.468065 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twwns\" (UniqueName: \"kubernetes.io/projected/4d91976f-4b13-453d-8ee1-9614f4d23edc-kube-api-access-twwns\") pod \"4d91976f-4b13-453d-8ee1-9614f4d23edc\" (UID: \"4d91976f-4b13-453d-8ee1-9614f4d23edc\") " Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.468671 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51e4eded-1818-4696-a425-227ce9bb1750-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.468807 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2xh2\" (UniqueName: \"kubernetes.io/projected/51e4eded-1818-4696-a425-227ce9bb1750-kube-api-access-g2xh2\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.468801 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d91976f-4b13-453d-8ee1-9614f4d23edc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4d91976f-4b13-453d-8ee1-9614f4d23edc" (UID: "4d91976f-4b13-453d-8ee1-9614f4d23edc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.485969 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d91976f-4b13-453d-8ee1-9614f4d23edc-kube-api-access-twwns" (OuterVolumeSpecName: "kube-api-access-twwns") pod "4d91976f-4b13-453d-8ee1-9614f4d23edc" (UID: "4d91976f-4b13-453d-8ee1-9614f4d23edc"). InnerVolumeSpecName "kube-api-access-twwns". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.511582 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-101b-account-create-update-b8krk" event={"ID":"4d91976f-4b13-453d-8ee1-9614f4d23edc","Type":"ContainerDied","Data":"c45cd038ea8a5c63078f2aa584a1bd1dbbaab6f2921cdf9e910d8a572a4d5f64"} Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.511820 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c45cd038ea8a5c63078f2aa584a1bd1dbbaab6f2921cdf9e910d8a572a4d5f64" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.511852 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-101b-account-create-update-b8krk" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.514506 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" event={"ID":"51e4eded-1818-4696-a425-227ce9bb1750","Type":"ContainerDied","Data":"21f1c76207847407232500f3f092228cd501873534d4becc1a80a841d2f5837e"} Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.514549 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="21f1c76207847407232500f3f092228cd501873534d4becc1a80a841d2f5837e" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.514569 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-28d2-account-create-update-qsqf8" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.516399 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2f39439c-442b-407e-9b64-ed1a23e6a97c","Type":"ContainerStarted","Data":"06be435affcc3fb271197d9488bc785058e330f77c82ef46681fe9feff29e43f"} Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.518638 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7ec1-account-create-update-269qf" event={"ID":"bd73c9ec-8283-44a3-8a72-2fc52180b2df","Type":"ContainerDied","Data":"00937459626fea14cb36ecc311da06791bae5856a435276868ee48e10ba2b62d"} Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.518666 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00937459626fea14cb36ecc311da06791bae5856a435276868ee48e10ba2b62d" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.518770 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7ec1-account-create-update-269qf" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.544481 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="ceilometer-central-agent" containerID="cri-o://7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d" gracePeriod=30 Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.544876 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerStarted","Data":"ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c"} Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.544921 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.546255 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="proxy-httpd" containerID="cri-o://ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c" gracePeriod=30 Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.546437 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="ceilometer-notification-agent" containerID="cri-o://1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29" gracePeriod=30 Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.547537 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="sg-core" containerID="cri-o://218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870" gracePeriod=30 Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.613897 4948 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d91976f-4b13-453d-8ee1-9614f4d23edc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.613942 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twwns\" (UniqueName: \"kubernetes.io/projected/4d91976f-4b13-453d-8ee1-9614f4d23edc-kube-api-access-twwns\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.634539 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.634508336 podStartE2EDuration="5.634508336s" podCreationTimestamp="2026-01-20 20:07:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:08:02.540500607 +0000 UTC m=+1110.491225576" watchObservedRunningTime="2026-01-20 20:08:02.634508336 +0000 UTC m=+1110.585233305" Jan 20 20:08:02 crc kubenswrapper[4948]: I0120 20:08:02.693010 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.936654965 podStartE2EDuration="10.692989289s" podCreationTimestamp="2026-01-20 20:07:52 +0000 UTC" firstStartedPulling="2026-01-20 20:07:54.51309588 +0000 UTC m=+1102.463820849" lastFinishedPulling="2026-01-20 20:08:01.269430204 +0000 UTC m=+1109.220155173" observedRunningTime="2026-01-20 20:08:02.655402786 +0000 UTC m=+1110.606127745" watchObservedRunningTime="2026-01-20 20:08:02.692989289 +0000 UTC m=+1110.643714258" Jan 20 20:08:03 crc kubenswrapper[4948]: I0120 20:08:03.562436 4948 generic.go:334] "Generic (PLEG): container finished" podID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerID="ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c" exitCode=0 Jan 20 20:08:03 crc kubenswrapper[4948]: I0120 20:08:03.562694 4948 generic.go:334] "Generic (PLEG): container finished" podID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerID="218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870" exitCode=2 Jan 20 20:08:03 crc kubenswrapper[4948]: I0120 20:08:03.562511 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerDied","Data":"ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c"} Jan 20 20:08:03 crc kubenswrapper[4948]: I0120 20:08:03.562749 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerDied","Data":"218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870"} Jan 20 20:08:03 crc kubenswrapper[4948]: I0120 20:08:03.562764 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerDied","Data":"1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29"} Jan 20 20:08:03 crc kubenswrapper[4948]: I0120 20:08:03.562716 4948 generic.go:334] "Generic (PLEG): container finished" podID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerID="1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29" exitCode=0 Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266120 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xpn28"] Jan 20 20:08:04 crc kubenswrapper[4948]: E0120 20:08:04.266541 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51e4eded-1818-4696-a425-227ce9bb1750" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266558 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="51e4eded-1818-4696-a425-227ce9bb1750" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: E0120 20:08:04.266577 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69739aba-0e18-493d-9957-8b215b4a2eef" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266583 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="69739aba-0e18-493d-9957-8b215b4a2eef" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: E0120 20:08:04.266595 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c5d2212-ff64-4cb5-964a-0fa269bb0249" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266601 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c5d2212-ff64-4cb5-964a-0fa269bb0249" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: E0120 20:08:04.266609 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd73c9ec-8283-44a3-8a72-2fc52180b2df" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266614 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd73c9ec-8283-44a3-8a72-2fc52180b2df" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: E0120 20:08:04.266623 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f66c168c-985d-43b6-a53d-5613b7a416cc" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266630 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f66c168c-985d-43b6-a53d-5613b7a416cc" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: E0120 20:08:04.266639 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d91976f-4b13-453d-8ee1-9614f4d23edc" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266645 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d91976f-4b13-453d-8ee1-9614f4d23edc" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266865 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f66c168c-985d-43b6-a53d-5613b7a416cc" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266882 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="51e4eded-1818-4696-a425-227ce9bb1750" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266896 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c5d2212-ff64-4cb5-964a-0fa269bb0249" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266907 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd73c9ec-8283-44a3-8a72-2fc52180b2df" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266919 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="69739aba-0e18-493d-9957-8b215b4a2eef" containerName="mariadb-database-create" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.266929 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d91976f-4b13-453d-8ee1-9614f4d23edc" containerName="mariadb-account-create-update" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.267537 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.270895 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.338788 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-bgvbx" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.339098 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.356565 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6tzz\" (UniqueName: \"kubernetes.io/projected/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-kube-api-access-q6tzz\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.356635 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-config-data\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.356679 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.356806 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-scripts\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.383762 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xpn28"] Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.458944 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-scripts\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.459093 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6tzz\" (UniqueName: \"kubernetes.io/projected/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-kube-api-access-q6tzz\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.459124 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-config-data\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.459160 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.465248 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-config-data\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.479066 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.483645 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-scripts\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.485249 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6tzz\" (UniqueName: \"kubernetes.io/projected/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-kube-api-access-q6tzz\") pod \"nova-cell0-conductor-db-sync-xpn28\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: I0120 20:08:04.702389 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:04 crc kubenswrapper[4948]: E0120 20:08:04.876842 4948 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda31cdb9_d009_48a3_92f0_5e0102d0096a.slice/crio-conmon-7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d.scope\": RecentStats: unable to find data in memory cache]" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.199621 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.281049 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-scripts\") pod \"da31cdb9-d009-48a3-92f0-5e0102d0096a\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.281206 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-log-httpd\") pod \"da31cdb9-d009-48a3-92f0-5e0102d0096a\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.281315 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-config-data\") pod \"da31cdb9-d009-48a3-92f0-5e0102d0096a\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.281380 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-sg-core-conf-yaml\") pod \"da31cdb9-d009-48a3-92f0-5e0102d0096a\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.281447 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-run-httpd\") pod \"da31cdb9-d009-48a3-92f0-5e0102d0096a\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.282238 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "da31cdb9-d009-48a3-92f0-5e0102d0096a" (UID: "da31cdb9-d009-48a3-92f0-5e0102d0096a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.282914 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "da31cdb9-d009-48a3-92f0-5e0102d0096a" (UID: "da31cdb9-d009-48a3-92f0-5e0102d0096a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.282958 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hgtbt\" (UniqueName: \"kubernetes.io/projected/da31cdb9-d009-48a3-92f0-5e0102d0096a-kube-api-access-hgtbt\") pod \"da31cdb9-d009-48a3-92f0-5e0102d0096a\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.283019 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-combined-ca-bundle\") pod \"da31cdb9-d009-48a3-92f0-5e0102d0096a\" (UID: \"da31cdb9-d009-48a3-92f0-5e0102d0096a\") " Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.283512 4948 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.293826 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da31cdb9-d009-48a3-92f0-5e0102d0096a-kube-api-access-hgtbt" (OuterVolumeSpecName: "kube-api-access-hgtbt") pod "da31cdb9-d009-48a3-92f0-5e0102d0096a" (UID: "da31cdb9-d009-48a3-92f0-5e0102d0096a"). InnerVolumeSpecName "kube-api-access-hgtbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.331962 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-scripts" (OuterVolumeSpecName: "scripts") pod "da31cdb9-d009-48a3-92f0-5e0102d0096a" (UID: "da31cdb9-d009-48a3-92f0-5e0102d0096a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.388470 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da31cdb9-d009-48a3-92f0-5e0102d0096a" (UID: "da31cdb9-d009-48a3-92f0-5e0102d0096a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.389755 4948 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/da31cdb9-d009-48a3-92f0-5e0102d0096a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.389788 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hgtbt\" (UniqueName: \"kubernetes.io/projected/da31cdb9-d009-48a3-92f0-5e0102d0096a-kube-api-access-hgtbt\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.389802 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.389815 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.389986 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "da31cdb9-d009-48a3-92f0-5e0102d0096a" (UID: "da31cdb9-d009-48a3-92f0-5e0102d0096a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.445843 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-config-data" (OuterVolumeSpecName: "config-data") pod "da31cdb9-d009-48a3-92f0-5e0102d0096a" (UID: "da31cdb9-d009-48a3-92f0-5e0102d0096a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.451580 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xpn28"] Jan 20 20:08:05 crc kubenswrapper[4948]: W0120 20:08:05.470360 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6bba308_c57f_4e3a_a2d8_1efb3f1d1844.slice/crio-22eb83cc604a9a3b2d45cd5762e6e152e09fc1da9904165c3412b0e58c51da5b WatchSource:0}: Error finding container 22eb83cc604a9a3b2d45cd5762e6e152e09fc1da9904165c3412b0e58c51da5b: Status 404 returned error can't find the container with id 22eb83cc604a9a3b2d45cd5762e6e152e09fc1da9904165c3412b0e58c51da5b Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.494912 4948 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.494960 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da31cdb9-d009-48a3-92f0-5e0102d0096a-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.582403 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-xpn28" event={"ID":"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844","Type":"ContainerStarted","Data":"22eb83cc604a9a3b2d45cd5762e6e152e09fc1da9904165c3412b0e58c51da5b"} Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.586450 4948 generic.go:334] "Generic (PLEG): container finished" podID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerID="7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d" exitCode=0 Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.586494 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerDied","Data":"7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d"} Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.586533 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"da31cdb9-d009-48a3-92f0-5e0102d0096a","Type":"ContainerDied","Data":"e3a75f21d53be0836036029a88478d5fac3c9d0aa06b01461a48dd3fcaa51725"} Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.586545 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.586592 4948 scope.go:117] "RemoveContainer" containerID="ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.615171 4948 scope.go:117] "RemoveContainer" containerID="218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.635779 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.646030 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.673349 4948 scope.go:117] "RemoveContainer" containerID="1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.673518 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:05 crc kubenswrapper[4948]: E0120 20:08:05.673998 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="ceilometer-central-agent" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.674016 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="ceilometer-central-agent" Jan 20 20:08:05 crc kubenswrapper[4948]: E0120 20:08:05.674045 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="ceilometer-notification-agent" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.674052 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="ceilometer-notification-agent" Jan 20 20:08:05 crc kubenswrapper[4948]: E0120 20:08:05.674062 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="proxy-httpd" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.674067 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="proxy-httpd" Jan 20 20:08:05 crc kubenswrapper[4948]: E0120 20:08:05.674075 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="sg-core" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.674081 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="sg-core" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.674231 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="sg-core" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.674249 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="ceilometer-notification-agent" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.674262 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="proxy-httpd" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.674276 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" containerName="ceilometer-central-agent" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.676115 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.681637 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.681807 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.712548 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.761081 4948 scope.go:117] "RemoveContainer" containerID="7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.789293 4948 scope.go:117] "RemoveContainer" containerID="ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c" Jan 20 20:08:05 crc kubenswrapper[4948]: E0120 20:08:05.791143 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c\": container with ID starting with ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c not found: ID does not exist" containerID="ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.791192 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c"} err="failed to get container status \"ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c\": rpc error: code = NotFound desc = could not find container \"ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c\": container with ID starting with ebb4b662b3952aeb525ec8d4569a9d2ea8b3b73a6a0bd6957565b2de7c59931c not found: ID does not exist" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.791250 4948 scope.go:117] "RemoveContainer" containerID="218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870" Jan 20 20:08:05 crc kubenswrapper[4948]: E0120 20:08:05.796175 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870\": container with ID starting with 218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870 not found: ID does not exist" containerID="218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.796251 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870"} err="failed to get container status \"218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870\": rpc error: code = NotFound desc = could not find container \"218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870\": container with ID starting with 218374ea8842c8339baba965f497c2ce6e53074648cb2fb2567f41c379da6870 not found: ID does not exist" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.796313 4948 scope.go:117] "RemoveContainer" containerID="1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29" Jan 20 20:08:05 crc kubenswrapper[4948]: E0120 20:08:05.799046 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29\": container with ID starting with 1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29 not found: ID does not exist" containerID="1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.799082 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29"} err="failed to get container status \"1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29\": rpc error: code = NotFound desc = could not find container \"1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29\": container with ID starting with 1da57f3232d4d2fd228a111bd4c8fce4512ef9a5a5d23f55f48e57553f348c29 not found: ID does not exist" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.799110 4948 scope.go:117] "RemoveContainer" containerID="7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d" Jan 20 20:08:05 crc kubenswrapper[4948]: E0120 20:08:05.799737 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d\": container with ID starting with 7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d not found: ID does not exist" containerID="7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.799767 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d"} err="failed to get container status \"7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d\": rpc error: code = NotFound desc = could not find container \"7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d\": container with ID starting with 7122d57c0abef097ccdcec19ba80797a2da73169144f03729e8cb220a6d4b75d not found: ID does not exist" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.834808 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-config-data\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.834850 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-scripts\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.834877 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-run-httpd\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.834926 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m6m5\" (UniqueName: \"kubernetes.io/projected/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-kube-api-access-7m6m5\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.834955 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.834974 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.834990 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-log-httpd\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.972453 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.972514 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-log-httpd\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.972661 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-config-data\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.972689 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-scripts\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.972741 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-run-httpd\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.972776 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m6m5\" (UniqueName: \"kubernetes.io/projected/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-kube-api-access-7m6m5\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.972807 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.975542 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-run-httpd\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.975891 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-log-httpd\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.984248 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.984321 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-scripts\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.986511 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:05 crc kubenswrapper[4948]: I0120 20:08:05.993787 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-config-data\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:06 crc kubenswrapper[4948]: I0120 20:08:06.011037 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m6m5\" (UniqueName: \"kubernetes.io/projected/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-kube-api-access-7m6m5\") pod \"ceilometer-0\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " pod="openstack/ceilometer-0" Jan 20 20:08:06 crc kubenswrapper[4948]: I0120 20:08:06.048920 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:06 crc kubenswrapper[4948]: I0120 20:08:06.389351 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:06 crc kubenswrapper[4948]: I0120 20:08:06.586194 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da31cdb9-d009-48a3-92f0-5e0102d0096a" path="/var/lib/kubelet/pods/da31cdb9-d009-48a3-92f0-5e0102d0096a/volumes" Jan 20 20:08:06 crc kubenswrapper[4948]: I0120 20:08:06.618541 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d1222f27-af2a-46fd-a296-37bdb8db4486","Type":"ContainerStarted","Data":"3c9341546e94b37bf429c8cf0199eb3a4f870bf8b2e8e1ba93610fd3da3c759a"} Jan 20 20:08:06 crc kubenswrapper[4948]: I0120 20:08:06.641516 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerStarted","Data":"b1b4078799eba288b25b9f686d3a1646945d4b91c633af4627346be08d05cc6f"} Jan 20 20:08:06 crc kubenswrapper[4948]: I0120 20:08:06.650915 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.289746499 podStartE2EDuration="35.65088847s" podCreationTimestamp="2026-01-20 20:07:31 +0000 UTC" firstStartedPulling="2026-01-20 20:07:32.843864602 +0000 UTC m=+1080.794589561" lastFinishedPulling="2026-01-20 20:08:05.205006563 +0000 UTC m=+1113.155731532" observedRunningTime="2026-01-20 20:08:06.639345843 +0000 UTC m=+1114.590070822" watchObservedRunningTime="2026-01-20 20:08:06.65088847 +0000 UTC m=+1114.601613439" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.000894 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.001149 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.081117 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.149161 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.667557 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.668919 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.696525 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerStarted","Data":"a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0"} Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.696580 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.696834 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.745049 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 20 20:08:07 crc kubenswrapper[4948]: I0120 20:08:07.745238 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 20 20:08:08 crc kubenswrapper[4948]: I0120 20:08:08.163392 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 20 20:08:08 crc kubenswrapper[4948]: I0120 20:08:08.714807 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerStarted","Data":"5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49"} Jan 20 20:08:08 crc kubenswrapper[4948]: I0120 20:08:08.716161 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 20 20:08:08 crc kubenswrapper[4948]: I0120 20:08:08.716184 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.395921 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.396249 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.397042 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"3a23ab38989e7c7f201254011c0807c65fcca348eb7fda45253cf536df81d13d"} pod="openstack/horizon-67dd67cb9b-9w4wk" containerMessage="Container horizon failed startup probe, will be restarted" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.397072 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" containerID="cri-o://3a23ab38989e7c7f201254011c0807c65fcca348eb7fda45253cf536df81d13d" gracePeriod=30 Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.542108 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.542182 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.542892 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"f5337fdeea822defb3bda066c6a194da1d66af7fc4c86187fb510469631f72ad"} pod="openstack/horizon-68bc7c4fc6-4mkmv" containerMessage="Container horizon failed startup probe, will be restarted" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.542926 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" containerID="cri-o://f5337fdeea822defb3bda066c6a194da1d66af7fc4c86187fb510469631f72ad" gracePeriod=30 Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.729664 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.729695 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:08:09 crc kubenswrapper[4948]: I0120 20:08:09.730754 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerStarted","Data":"1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5"} Jan 20 20:08:10 crc kubenswrapper[4948]: I0120 20:08:10.299938 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="bf15b74a-2849-4970-87a3-83d7e1b788ba" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.170:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:08:10 crc kubenswrapper[4948]: I0120 20:08:10.737773 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:08:10 crc kubenswrapper[4948]: I0120 20:08:10.737797 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:08:11 crc kubenswrapper[4948]: I0120 20:08:11.765954 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerStarted","Data":"4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7"} Jan 20 20:08:11 crc kubenswrapper[4948]: I0120 20:08:11.766648 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 20:08:11 crc kubenswrapper[4948]: I0120 20:08:11.796560 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.249675781 podStartE2EDuration="6.796536507s" podCreationTimestamp="2026-01-20 20:08:05 +0000 UTC" firstStartedPulling="2026-01-20 20:08:06.412489638 +0000 UTC m=+1114.363214607" lastFinishedPulling="2026-01-20 20:08:10.959350364 +0000 UTC m=+1118.910075333" observedRunningTime="2026-01-20 20:08:11.79028018 +0000 UTC m=+1119.741005169" watchObservedRunningTime="2026-01-20 20:08:11.796536507 +0000 UTC m=+1119.747261476" Jan 20 20:08:13 crc kubenswrapper[4948]: I0120 20:08:13.762521 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 20 20:08:13 crc kubenswrapper[4948]: I0120 20:08:13.762964 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:08:13 crc kubenswrapper[4948]: I0120 20:08:13.799850 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 20 20:08:13 crc kubenswrapper[4948]: I0120 20:08:13.800080 4948 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 20 20:08:13 crc kubenswrapper[4948]: I0120 20:08:13.801061 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 20 20:08:13 crc kubenswrapper[4948]: I0120 20:08:13.808076 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 20 20:08:20 crc kubenswrapper[4948]: I0120 20:08:20.250122 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:08:20 crc kubenswrapper[4948]: I0120 20:08:20.250611 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:08:20 crc kubenswrapper[4948]: I0120 20:08:20.250680 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:08:20 crc kubenswrapper[4948]: I0120 20:08:20.251411 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a26c04565cc618f3f275d4a90dd01432ac1f9fe490efd0919ef900cbd2cc4e1c"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:08:20 crc kubenswrapper[4948]: I0120 20:08:20.251466 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://a26c04565cc618f3f275d4a90dd01432ac1f9fe490efd0919ef900cbd2cc4e1c" gracePeriod=600 Jan 20 20:08:20 crc kubenswrapper[4948]: I0120 20:08:20.914456 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="a26c04565cc618f3f275d4a90dd01432ac1f9fe490efd0919ef900cbd2cc4e1c" exitCode=0 Jan 20 20:08:20 crc kubenswrapper[4948]: I0120 20:08:20.914521 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"a26c04565cc618f3f275d4a90dd01432ac1f9fe490efd0919ef900cbd2cc4e1c"} Jan 20 20:08:20 crc kubenswrapper[4948]: I0120 20:08:20.914562 4948 scope.go:117] "RemoveContainer" containerID="8ea9bb8d6d2b455140d4d17b9b3ddbc16caa6ff50e9a5f66da80be0038f97979" Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.255780 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.262204 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="ceilometer-central-agent" containerID="cri-o://a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0" gracePeriod=30 Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.262253 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="sg-core" containerID="cri-o://1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5" gracePeriod=30 Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.262257 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="ceilometer-notification-agent" containerID="cri-o://5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49" gracePeriod=30 Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.262422 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="proxy-httpd" containerID="cri-o://4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7" gracePeriod=30 Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.278854 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.952535 4948 generic.go:334] "Generic (PLEG): container finished" podID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerID="4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7" exitCode=0 Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.952601 4948 generic.go:334] "Generic (PLEG): container finished" podID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerID="1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5" exitCode=2 Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.952610 4948 generic.go:334] "Generic (PLEG): container finished" podID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerID="5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49" exitCode=0 Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.952642 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerDied","Data":"4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7"} Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.952715 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerDied","Data":"1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5"} Jan 20 20:08:22 crc kubenswrapper[4948]: I0120 20:08:22.952726 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerDied","Data":"5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49"} Jan 20 20:08:25 crc kubenswrapper[4948]: E0120 20:08:25.886318 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified" Jan 20 20:08:25 crc kubenswrapper[4948]: E0120 20:08:25.888614 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nova-cell0-conductor-db-sync,Image:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CELL_NAME,Value:cell0,ValueFrom:nil,},EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:false,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/kolla/config_files/config.json,SubPath:nova-conductor-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-q6tzz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42436,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-cell0-conductor-db-sync-xpn28_openstack(b6bba308-c57f-4e3a-a2d8-1efb3f1d1844): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:08:25 crc kubenswrapper[4948]: E0120 20:08:25.889797 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/nova-cell0-conductor-db-sync-xpn28" podUID="b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" Jan 20 20:08:26 crc kubenswrapper[4948]: E0120 20:08:26.000364 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified\\\"\"" pod="openstack/nova-cell0-conductor-db-sync-xpn28" podUID="b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" Jan 20 20:08:27 crc kubenswrapper[4948]: I0120 20:08:27.024464 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"7f6e2109b164e1a5b2cd57afe834ac3fbe85f27835236a7bebdf71bc6a9761ad"} Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.589615 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.757988 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-scripts\") pod \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.758022 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-config-data\") pod \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.758048 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-sg-core-conf-yaml\") pod \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.758842 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-run-httpd\") pod \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.759008 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-log-httpd\") pod \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.759044 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-combined-ca-bundle\") pod \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.759144 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m6m5\" (UniqueName: \"kubernetes.io/projected/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-kube-api-access-7m6m5\") pod \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\" (UID: \"7f0fe21e-39ad-4b67-a735-43c5c67d99fc\") " Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.759421 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7f0fe21e-39ad-4b67-a735-43c5c67d99fc" (UID: "7f0fe21e-39ad-4b67-a735-43c5c67d99fc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.759640 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7f0fe21e-39ad-4b67-a735-43c5c67d99fc" (UID: "7f0fe21e-39ad-4b67-a735-43c5c67d99fc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.759935 4948 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.759954 4948 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.767466 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-kube-api-access-7m6m5" (OuterVolumeSpecName: "kube-api-access-7m6m5") pod "7f0fe21e-39ad-4b67-a735-43c5c67d99fc" (UID: "7f0fe21e-39ad-4b67-a735-43c5c67d99fc"). InnerVolumeSpecName "kube-api-access-7m6m5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.776934 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-scripts" (OuterVolumeSpecName: "scripts") pod "7f0fe21e-39ad-4b67-a735-43c5c67d99fc" (UID: "7f0fe21e-39ad-4b67-a735-43c5c67d99fc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.844357 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7f0fe21e-39ad-4b67-a735-43c5c67d99fc" (UID: "7f0fe21e-39ad-4b67-a735-43c5c67d99fc"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.862505 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7m6m5\" (UniqueName: \"kubernetes.io/projected/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-kube-api-access-7m6m5\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.862547 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.862561 4948 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.896271 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7f0fe21e-39ad-4b67-a735-43c5c67d99fc" (UID: "7f0fe21e-39ad-4b67-a735-43c5c67d99fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.905968 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-config-data" (OuterVolumeSpecName: "config-data") pod "7f0fe21e-39ad-4b67-a735-43c5c67d99fc" (UID: "7f0fe21e-39ad-4b67-a735-43c5c67d99fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.964884 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:28 crc kubenswrapper[4948]: I0120 20:08:28.964924 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f0fe21e-39ad-4b67-a735-43c5c67d99fc-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.045546 4948 generic.go:334] "Generic (PLEG): container finished" podID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerID="a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0" exitCode=0 Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.045608 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerDied","Data":"a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0"} Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.045660 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7f0fe21e-39ad-4b67-a735-43c5c67d99fc","Type":"ContainerDied","Data":"b1b4078799eba288b25b9f686d3a1646945d4b91c633af4627346be08d05cc6f"} Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.045683 4948 scope.go:117] "RemoveContainer" containerID="4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.045893 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.079052 4948 scope.go:117] "RemoveContainer" containerID="1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.097285 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.106826 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.148309 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:29 crc kubenswrapper[4948]: E0120 20:08:29.148817 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="ceilometer-notification-agent" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.148843 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="ceilometer-notification-agent" Jan 20 20:08:29 crc kubenswrapper[4948]: E0120 20:08:29.148885 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="sg-core" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.148893 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="sg-core" Jan 20 20:08:29 crc kubenswrapper[4948]: E0120 20:08:29.148913 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="proxy-httpd" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.148920 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="proxy-httpd" Jan 20 20:08:29 crc kubenswrapper[4948]: E0120 20:08:29.148935 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="ceilometer-central-agent" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.148944 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="ceilometer-central-agent" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.149133 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="sg-core" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.149156 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="ceilometer-notification-agent" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.149169 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="proxy-httpd" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.149181 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" containerName="ceilometer-central-agent" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.150949 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.153583 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.153908 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.162135 4948 scope.go:117] "RemoveContainer" containerID="5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.182823 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.225829 4948 scope.go:117] "RemoveContainer" containerID="a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.252435 4948 scope.go:117] "RemoveContainer" containerID="4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7" Jan 20 20:08:29 crc kubenswrapper[4948]: E0120 20:08:29.256649 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7\": container with ID starting with 4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7 not found: ID does not exist" containerID="4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.256712 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7"} err="failed to get container status \"4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7\": rpc error: code = NotFound desc = could not find container \"4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7\": container with ID starting with 4026866176eefc02a961bc337759faf9c8e12914b92722a0641c89276754b3e7 not found: ID does not exist" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.256746 4948 scope.go:117] "RemoveContainer" containerID="1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5" Jan 20 20:08:29 crc kubenswrapper[4948]: E0120 20:08:29.257140 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5\": container with ID starting with 1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5 not found: ID does not exist" containerID="1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.257158 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5"} err="failed to get container status \"1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5\": rpc error: code = NotFound desc = could not find container \"1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5\": container with ID starting with 1ddfd1937bd7042bd4475af091f8f6283607e29941536307accf8e055d8fcbb5 not found: ID does not exist" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.257171 4948 scope.go:117] "RemoveContainer" containerID="5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49" Jan 20 20:08:29 crc kubenswrapper[4948]: E0120 20:08:29.257419 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49\": container with ID starting with 5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49 not found: ID does not exist" containerID="5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.257450 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49"} err="failed to get container status \"5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49\": rpc error: code = NotFound desc = could not find container \"5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49\": container with ID starting with 5a5fdbf197e0227af3e60415551e122a7024b1eac0524e8ff521c64fed8b3a49 not found: ID does not exist" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.257473 4948 scope.go:117] "RemoveContainer" containerID="a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0" Jan 20 20:08:29 crc kubenswrapper[4948]: E0120 20:08:29.257750 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0\": container with ID starting with a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0 not found: ID does not exist" containerID="a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.257770 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0"} err="failed to get container status \"a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0\": rpc error: code = NotFound desc = could not find container \"a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0\": container with ID starting with a6c5dd85c7d9f1f974a0c6f099fec59206c2b5a8ad5f06e18daaa52fc3390ef0 not found: ID does not exist" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.269294 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-config-data\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.269344 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-scripts\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.269399 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-log-httpd\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.269417 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk4xw\" (UniqueName: \"kubernetes.io/projected/cfc2c00b-c795-4f6d-a945-f20dabe04331-kube-api-access-zk4xw\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.269452 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.269482 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-run-httpd\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.269513 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.371812 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-log-httpd\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.371856 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk4xw\" (UniqueName: \"kubernetes.io/projected/cfc2c00b-c795-4f6d-a945-f20dabe04331-kube-api-access-zk4xw\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.371903 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.371939 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-run-httpd\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.371985 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.372064 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-config-data\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.372098 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-scripts\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.376064 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-run-httpd\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.376391 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-log-httpd\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.378557 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-scripts\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.379846 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.381555 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-config-data\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.405697 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk4xw\" (UniqueName: \"kubernetes.io/projected/cfc2c00b-c795-4f6d-a945-f20dabe04331-kube-api-access-zk4xw\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.406670 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.411345 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.411835 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="e7ede84b-9ae0-49a5-a694-acacdd4c1b95" containerName="kube-state-metrics" containerID="cri-o://4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e" gracePeriod=30 Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.474067 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.857947 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.868975 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:08:29 crc kubenswrapper[4948]: I0120 20:08:29.962003 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.058980 4948 generic.go:334] "Generic (PLEG): container finished" podID="e7ede84b-9ae0-49a5-a694-acacdd4c1b95" containerID="4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e" exitCode=2 Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.059115 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.059986 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e7ede84b-9ae0-49a5-a694-acacdd4c1b95","Type":"ContainerDied","Data":"4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e"} Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.060030 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"e7ede84b-9ae0-49a5-a694-acacdd4c1b95","Type":"ContainerDied","Data":"8b8cb564068b7ecf0abf7b2a4334218fd50ef77c8124f5b0cc9815c61cfeef7e"} Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.060052 4948 scope.go:117] "RemoveContainer" containerID="4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.062131 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerStarted","Data":"c8c892f458932ff0ff1099e27ece160d6e462b00859dd40f5d102bdfde631e99"} Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.087240 4948 scope.go:117] "RemoveContainer" containerID="4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e" Jan 20 20:08:30 crc kubenswrapper[4948]: E0120 20:08:30.088413 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e\": container with ID starting with 4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e not found: ID does not exist" containerID="4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.088454 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e"} err="failed to get container status \"4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e\": rpc error: code = NotFound desc = could not find container \"4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e\": container with ID starting with 4feb0c91af3bd643d22be9ba93e42466e9b636dbef998700799d40146f217a5e not found: ID does not exist" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.094984 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdf85\" (UniqueName: \"kubernetes.io/projected/e7ede84b-9ae0-49a5-a694-acacdd4c1b95-kube-api-access-qdf85\") pod \"e7ede84b-9ae0-49a5-a694-acacdd4c1b95\" (UID: \"e7ede84b-9ae0-49a5-a694-acacdd4c1b95\") " Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.106523 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7ede84b-9ae0-49a5-a694-acacdd4c1b95-kube-api-access-qdf85" (OuterVolumeSpecName: "kube-api-access-qdf85") pod "e7ede84b-9ae0-49a5-a694-acacdd4c1b95" (UID: "e7ede84b-9ae0-49a5-a694-acacdd4c1b95"). InnerVolumeSpecName "kube-api-access-qdf85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.197152 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdf85\" (UniqueName: \"kubernetes.io/projected/e7ede84b-9ae0-49a5-a694-acacdd4c1b95-kube-api-access-qdf85\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.390221 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.398537 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.415882 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:08:30 crc kubenswrapper[4948]: E0120 20:08:30.416258 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7ede84b-9ae0-49a5-a694-acacdd4c1b95" containerName="kube-state-metrics" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.416274 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7ede84b-9ae0-49a5-a694-acacdd4c1b95" containerName="kube-state-metrics" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.416462 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7ede84b-9ae0-49a5-a694-acacdd4c1b95" containerName="kube-state-metrics" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.417201 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.421037 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.422242 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.435377 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.502942 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.503018 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.503043 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.503119 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qglj\" (UniqueName: \"kubernetes.io/projected/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-api-access-7qglj\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.583194 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f0fe21e-39ad-4b67-a735-43c5c67d99fc" path="/var/lib/kubelet/pods/7f0fe21e-39ad-4b67-a735-43c5c67d99fc/volumes" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.584371 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7ede84b-9ae0-49a5-a694-acacdd4c1b95" path="/var/lib/kubelet/pods/e7ede84b-9ae0-49a5-a694-acacdd4c1b95/volumes" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.604977 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qglj\" (UniqueName: \"kubernetes.io/projected/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-api-access-7qglj\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.605120 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.605178 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.605207 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.610091 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.610358 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.610431 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.628085 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qglj\" (UniqueName: \"kubernetes.io/projected/3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f-kube-api-access-7qglj\") pod \"kube-state-metrics-0\" (UID: \"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f\") " pod="openstack/kube-state-metrics-0" Jan 20 20:08:30 crc kubenswrapper[4948]: I0120 20:08:30.879517 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 20 20:08:31 crc kubenswrapper[4948]: I0120 20:08:31.100673 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerStarted","Data":"cd0e909016e7f04f370678e426b62b00d205ca67a769fbaa069ccb10f99450d1"} Jan 20 20:08:31 crc kubenswrapper[4948]: I0120 20:08:31.416497 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 20 20:08:32 crc kubenswrapper[4948]: I0120 20:08:32.129819 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerStarted","Data":"6a5447c20ff74e70bb52a494ff1cc4759dffb0162bb39065e864a95aaa2ce6e8"} Jan 20 20:08:32 crc kubenswrapper[4948]: I0120 20:08:32.142543 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f","Type":"ContainerStarted","Data":"5f0c34c8f7c88d7655c7a5a4673c88b414948d7efa2cdb3ad48bb36d3d6efd5d"} Jan 20 20:08:32 crc kubenswrapper[4948]: I0120 20:08:32.142590 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f","Type":"ContainerStarted","Data":"b1832509477572f8440ea31ae63ac4536b07f08750956718a1871e79a2ca8e6d"} Jan 20 20:08:32 crc kubenswrapper[4948]: I0120 20:08:32.143088 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 20 20:08:32 crc kubenswrapper[4948]: I0120 20:08:32.173019 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.806218507 podStartE2EDuration="2.172998119s" podCreationTimestamp="2026-01-20 20:08:30 +0000 UTC" firstStartedPulling="2026-01-20 20:08:31.413759149 +0000 UTC m=+1139.364484118" lastFinishedPulling="2026-01-20 20:08:31.780538761 +0000 UTC m=+1139.731263730" observedRunningTime="2026-01-20 20:08:32.163633994 +0000 UTC m=+1140.114358963" watchObservedRunningTime="2026-01-20 20:08:32.172998119 +0000 UTC m=+1140.123723098" Jan 20 20:08:32 crc kubenswrapper[4948]: I0120 20:08:32.285668 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:33 crc kubenswrapper[4948]: I0120 20:08:33.156259 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerStarted","Data":"744ebd36e85dbf2299623b8c317af5c21d452323ae8605ede2db6aeaa9abdb94"} Jan 20 20:08:35 crc kubenswrapper[4948]: I0120 20:08:35.182737 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerStarted","Data":"114c1785788d0dff79275639fc31bc9860fe7381763237c901bfa5bc46a11383"} Jan 20 20:08:35 crc kubenswrapper[4948]: I0120 20:08:35.183171 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 20:08:35 crc kubenswrapper[4948]: I0120 20:08:35.182947 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="sg-core" containerID="cri-o://744ebd36e85dbf2299623b8c317af5c21d452323ae8605ede2db6aeaa9abdb94" gracePeriod=30 Jan 20 20:08:35 crc kubenswrapper[4948]: I0120 20:08:35.182923 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="proxy-httpd" containerID="cri-o://114c1785788d0dff79275639fc31bc9860fe7381763237c901bfa5bc46a11383" gracePeriod=30 Jan 20 20:08:35 crc kubenswrapper[4948]: I0120 20:08:35.182981 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="ceilometer-central-agent" containerID="cri-o://cd0e909016e7f04f370678e426b62b00d205ca67a769fbaa069ccb10f99450d1" gracePeriod=30 Jan 20 20:08:35 crc kubenswrapper[4948]: I0120 20:08:35.182968 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="ceilometer-notification-agent" containerID="cri-o://6a5447c20ff74e70bb52a494ff1cc4759dffb0162bb39065e864a95aaa2ce6e8" gracePeriod=30 Jan 20 20:08:35 crc kubenswrapper[4948]: I0120 20:08:35.544275 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.529305467 podStartE2EDuration="6.54424627s" podCreationTimestamp="2026-01-20 20:08:29 +0000 UTC" firstStartedPulling="2026-01-20 20:08:29.868524063 +0000 UTC m=+1137.819249032" lastFinishedPulling="2026-01-20 20:08:33.883464866 +0000 UTC m=+1141.834189835" observedRunningTime="2026-01-20 20:08:35.540836583 +0000 UTC m=+1143.491561552" watchObservedRunningTime="2026-01-20 20:08:35.54424627 +0000 UTC m=+1143.494971239" Jan 20 20:08:35 crc kubenswrapper[4948]: E0120 20:08:35.702030 4948 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcfc2c00b_c795_4f6d_a945_f20dabe04331.slice/crio-744ebd36e85dbf2299623b8c317af5c21d452323ae8605ede2db6aeaa9abdb94.scope\": RecentStats: unable to find data in memory cache]" Jan 20 20:08:36 crc kubenswrapper[4948]: I0120 20:08:36.195249 4948 generic.go:334] "Generic (PLEG): container finished" podID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerID="114c1785788d0dff79275639fc31bc9860fe7381763237c901bfa5bc46a11383" exitCode=0 Jan 20 20:08:36 crc kubenswrapper[4948]: I0120 20:08:36.195294 4948 generic.go:334] "Generic (PLEG): container finished" podID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerID="744ebd36e85dbf2299623b8c317af5c21d452323ae8605ede2db6aeaa9abdb94" exitCode=2 Jan 20 20:08:36 crc kubenswrapper[4948]: I0120 20:08:36.195304 4948 generic.go:334] "Generic (PLEG): container finished" podID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerID="6a5447c20ff74e70bb52a494ff1cc4759dffb0162bb39065e864a95aaa2ce6e8" exitCode=0 Jan 20 20:08:36 crc kubenswrapper[4948]: I0120 20:08:36.195307 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerDied","Data":"114c1785788d0dff79275639fc31bc9860fe7381763237c901bfa5bc46a11383"} Jan 20 20:08:36 crc kubenswrapper[4948]: I0120 20:08:36.195354 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerDied","Data":"744ebd36e85dbf2299623b8c317af5c21d452323ae8605ede2db6aeaa9abdb94"} Jan 20 20:08:36 crc kubenswrapper[4948]: I0120 20:08:36.195365 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerDied","Data":"6a5447c20ff74e70bb52a494ff1cc4759dffb0162bb39065e864a95aaa2ce6e8"} Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.249551 4948 generic.go:334] "Generic (PLEG): container finished" podID="4d2c0905-915e-4504-8454-ee3500220ab3" containerID="3a23ab38989e7c7f201254011c0807c65fcca348eb7fda45253cf536df81d13d" exitCode=137 Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.249835 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dd67cb9b-9w4wk" event={"ID":"4d2c0905-915e-4504-8454-ee3500220ab3","Type":"ContainerDied","Data":"3a23ab38989e7c7f201254011c0807c65fcca348eb7fda45253cf536df81d13d"} Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.250108 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dd67cb9b-9w4wk" event={"ID":"4d2c0905-915e-4504-8454-ee3500220ab3","Type":"ContainerStarted","Data":"87f4c3b2c6dd557e6ef560a203b577eeda11064eb3ebfbe7c882772cb8bc9629"} Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.250131 4948 scope.go:117] "RemoveContainer" containerID="08d9c3660e3ecd0832afba6cf5911a8e8427e7bed01955d0e134ac074a19a3f1" Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.256747 4948 generic.go:334] "Generic (PLEG): container finished" podID="af522f17-3cad-4004-b112-51e47fa9fea7" containerID="f5337fdeea822defb3bda066c6a194da1d66af7fc4c86187fb510469631f72ad" exitCode=137 Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.256855 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerDied","Data":"f5337fdeea822defb3bda066c6a194da1d66af7fc4c86187fb510469631f72ad"} Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.256932 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerStarted","Data":"eb250b4b5dbae1e0a758f7d341fc5c9464138bb0ec515d14abc4b1571a5d19f5"} Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.491045 4948 scope.go:117] "RemoveContainer" containerID="3d0b58f79a4101a472c79a9066f937e017f54113f2910aa3d332331e863ecd0f" Jan 20 20:08:40 crc kubenswrapper[4948]: I0120 20:08:40.895572 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 20 20:08:41 crc kubenswrapper[4948]: I0120 20:08:41.274605 4948 generic.go:334] "Generic (PLEG): container finished" podID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerID="cd0e909016e7f04f370678e426b62b00d205ca67a769fbaa069ccb10f99450d1" exitCode=0 Jan 20 20:08:41 crc kubenswrapper[4948]: I0120 20:08:41.274679 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerDied","Data":"cd0e909016e7f04f370678e426b62b00d205ca67a769fbaa069ccb10f99450d1"} Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.110998 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253155 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-log-httpd\") pod \"cfc2c00b-c795-4f6d-a945-f20dabe04331\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253554 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-run-httpd\") pod \"cfc2c00b-c795-4f6d-a945-f20dabe04331\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253606 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cfc2c00b-c795-4f6d-a945-f20dabe04331" (UID: "cfc2c00b-c795-4f6d-a945-f20dabe04331"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253630 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-scripts\") pod \"cfc2c00b-c795-4f6d-a945-f20dabe04331\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253739 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-sg-core-conf-yaml\") pod \"cfc2c00b-c795-4f6d-a945-f20dabe04331\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253766 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-combined-ca-bundle\") pod \"cfc2c00b-c795-4f6d-a945-f20dabe04331\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253812 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zk4xw\" (UniqueName: \"kubernetes.io/projected/cfc2c00b-c795-4f6d-a945-f20dabe04331-kube-api-access-zk4xw\") pod \"cfc2c00b-c795-4f6d-a945-f20dabe04331\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253909 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-config-data\") pod \"cfc2c00b-c795-4f6d-a945-f20dabe04331\" (UID: \"cfc2c00b-c795-4f6d-a945-f20dabe04331\") " Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.253956 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cfc2c00b-c795-4f6d-a945-f20dabe04331" (UID: "cfc2c00b-c795-4f6d-a945-f20dabe04331"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.254353 4948 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.254370 4948 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cfc2c00b-c795-4f6d-a945-f20dabe04331-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.269258 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-scripts" (OuterVolumeSpecName: "scripts") pod "cfc2c00b-c795-4f6d-a945-f20dabe04331" (UID: "cfc2c00b-c795-4f6d-a945-f20dabe04331"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.273859 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfc2c00b-c795-4f6d-a945-f20dabe04331-kube-api-access-zk4xw" (OuterVolumeSpecName: "kube-api-access-zk4xw") pod "cfc2c00b-c795-4f6d-a945-f20dabe04331" (UID: "cfc2c00b-c795-4f6d-a945-f20dabe04331"). InnerVolumeSpecName "kube-api-access-zk4xw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.300783 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-xpn28" event={"ID":"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844","Type":"ContainerStarted","Data":"eae9735274d1023e219135a04831bdb15fd72c95cdabbd5a07697e6e6c1a4d16"} Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.330456 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cfc2c00b-c795-4f6d-a945-f20dabe04331","Type":"ContainerDied","Data":"c8c892f458932ff0ff1099e27ece160d6e462b00859dd40f5d102bdfde631e99"} Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.330739 4948 scope.go:117] "RemoveContainer" containerID="114c1785788d0dff79275639fc31bc9860fe7381763237c901bfa5bc46a11383" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.331217 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.337597 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-xpn28" podStartSLOduration=2.540441746 podStartE2EDuration="38.33757808s" podCreationTimestamp="2026-01-20 20:08:04 +0000 UTC" firstStartedPulling="2026-01-20 20:08:05.479292579 +0000 UTC m=+1113.430017548" lastFinishedPulling="2026-01-20 20:08:41.276428913 +0000 UTC m=+1149.227153882" observedRunningTime="2026-01-20 20:08:42.328507733 +0000 UTC m=+1150.279232722" watchObservedRunningTime="2026-01-20 20:08:42.33757808 +0000 UTC m=+1150.288303049" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.340776 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cfc2c00b-c795-4f6d-a945-f20dabe04331" (UID: "cfc2c00b-c795-4f6d-a945-f20dabe04331"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.356818 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.356856 4948 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.356866 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zk4xw\" (UniqueName: \"kubernetes.io/projected/cfc2c00b-c795-4f6d-a945-f20dabe04331-kube-api-access-zk4xw\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.388502 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cfc2c00b-c795-4f6d-a945-f20dabe04331" (UID: "cfc2c00b-c795-4f6d-a945-f20dabe04331"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.411498 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-config-data" (OuterVolumeSpecName: "config-data") pod "cfc2c00b-c795-4f6d-a945-f20dabe04331" (UID: "cfc2c00b-c795-4f6d-a945-f20dabe04331"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.458329 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.458381 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cfc2c00b-c795-4f6d-a945-f20dabe04331-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.491643 4948 scope.go:117] "RemoveContainer" containerID="744ebd36e85dbf2299623b8c317af5c21d452323ae8605ede2db6aeaa9abdb94" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.513646 4948 scope.go:117] "RemoveContainer" containerID="6a5447c20ff74e70bb52a494ff1cc4759dffb0162bb39065e864a95aaa2ce6e8" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.534248 4948 scope.go:117] "RemoveContainer" containerID="cd0e909016e7f04f370678e426b62b00d205ca67a769fbaa069ccb10f99450d1" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.662871 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.672074 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.685987 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:42 crc kubenswrapper[4948]: E0120 20:08:42.686438 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="sg-core" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.686464 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="sg-core" Jan 20 20:08:42 crc kubenswrapper[4948]: E0120 20:08:42.686483 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="proxy-httpd" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.686491 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="proxy-httpd" Jan 20 20:08:42 crc kubenswrapper[4948]: E0120 20:08:42.686509 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="ceilometer-central-agent" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.686518 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="ceilometer-central-agent" Jan 20 20:08:42 crc kubenswrapper[4948]: E0120 20:08:42.686551 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="ceilometer-notification-agent" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.686560 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="ceilometer-notification-agent" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.686795 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="proxy-httpd" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.686819 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="sg-core" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.686839 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="ceilometer-notification-agent" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.686856 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" containerName="ceilometer-central-agent" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.691759 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.705248 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.709694 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.709809 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.710058 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.764050 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-scripts\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.764121 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.764183 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-config-data\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.764206 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.764229 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flstb\" (UniqueName: \"kubernetes.io/projected/b375751a-1794-4942-9f54-3c726c645fc1-kube-api-access-flstb\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.764262 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-log-httpd\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.764305 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-run-httpd\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.764335 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.865162 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.865214 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-scripts\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.865254 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.865300 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-config-data\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.865317 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.865346 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flstb\" (UniqueName: \"kubernetes.io/projected/b375751a-1794-4942-9f54-3c726c645fc1-kube-api-access-flstb\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.865385 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-log-httpd\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.865426 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-run-httpd\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.866346 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-log-httpd\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.866407 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-run-httpd\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.868321 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.869951 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-scripts\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.870302 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.873384 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.873634 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-config-data\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:42 crc kubenswrapper[4948]: I0120 20:08:42.884552 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flstb\" (UniqueName: \"kubernetes.io/projected/b375751a-1794-4942-9f54-3c726c645fc1-kube-api-access-flstb\") pod \"ceilometer-0\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " pod="openstack/ceilometer-0" Jan 20 20:08:43 crc kubenswrapper[4948]: I0120 20:08:43.009260 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:43 crc kubenswrapper[4948]: I0120 20:08:43.719236 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:43 crc kubenswrapper[4948]: W0120 20:08:43.724972 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb375751a_1794_4942_9f54_3c726c645fc1.slice/crio-7080a064e5203eb7d2e39ff4777854c5fc015adeb358822cd6e424034599587b WatchSource:0}: Error finding container 7080a064e5203eb7d2e39ff4777854c5fc015adeb358822cd6e424034599587b: Status 404 returned error can't find the container with id 7080a064e5203eb7d2e39ff4777854c5fc015adeb358822cd6e424034599587b Jan 20 20:08:44 crc kubenswrapper[4948]: I0120 20:08:44.370463 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerStarted","Data":"7080a064e5203eb7d2e39ff4777854c5fc015adeb358822cd6e424034599587b"} Jan 20 20:08:44 crc kubenswrapper[4948]: I0120 20:08:44.581944 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfc2c00b-c795-4f6d-a945-f20dabe04331" path="/var/lib/kubelet/pods/cfc2c00b-c795-4f6d-a945-f20dabe04331/volumes" Jan 20 20:08:45 crc kubenswrapper[4948]: I0120 20:08:45.477660 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerStarted","Data":"8d3efb295606be939b1ac9a2e88becffa71fc77ad93e7d978336fe7b9a593217"} Jan 20 20:08:46 crc kubenswrapper[4948]: I0120 20:08:46.496102 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerStarted","Data":"eb61bc7305f40bab6fbc3688c1490358a8edee5adb11d0222d64c87d01a289f3"} Jan 20 20:08:46 crc kubenswrapper[4948]: I0120 20:08:46.496726 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerStarted","Data":"8f0b57742ecbd83c94f28c788bc9ab3881a4e7a11f2bb5c79770f326266309b5"} Jan 20 20:08:47 crc kubenswrapper[4948]: I0120 20:08:47.988188 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:48 crc kubenswrapper[4948]: I0120 20:08:48.521675 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerStarted","Data":"a50e2f9ac506d2fa3191bebb8c673eaca742b5ee52d5d9c491ee0b0052cfe37f"} Jan 20 20:08:48 crc kubenswrapper[4948]: I0120 20:08:48.521979 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 20:08:49 crc kubenswrapper[4948]: I0120 20:08:49.393917 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:08:49 crc kubenswrapper[4948]: I0120 20:08:49.393997 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:08:49 crc kubenswrapper[4948]: I0120 20:08:49.556571 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:08:49 crc kubenswrapper[4948]: I0120 20:08:49.557149 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:08:49 crc kubenswrapper[4948]: I0120 20:08:49.563585 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="ceilometer-central-agent" containerID="cri-o://8d3efb295606be939b1ac9a2e88becffa71fc77ad93e7d978336fe7b9a593217" gracePeriod=30 Jan 20 20:08:49 crc kubenswrapper[4948]: I0120 20:08:49.564097 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="proxy-httpd" containerID="cri-o://a50e2f9ac506d2fa3191bebb8c673eaca742b5ee52d5d9c491ee0b0052cfe37f" gracePeriod=30 Jan 20 20:08:49 crc kubenswrapper[4948]: I0120 20:08:49.564124 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="ceilometer-notification-agent" containerID="cri-o://8f0b57742ecbd83c94f28c788bc9ab3881a4e7a11f2bb5c79770f326266309b5" gracePeriod=30 Jan 20 20:08:49 crc kubenswrapper[4948]: I0120 20:08:49.564117 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="sg-core" containerID="cri-o://eb61bc7305f40bab6fbc3688c1490358a8edee5adb11d0222d64c87d01a289f3" gracePeriod=30 Jan 20 20:08:50 crc kubenswrapper[4948]: I0120 20:08:50.577578 4948 generic.go:334] "Generic (PLEG): container finished" podID="b375751a-1794-4942-9f54-3c726c645fc1" containerID="a50e2f9ac506d2fa3191bebb8c673eaca742b5ee52d5d9c491ee0b0052cfe37f" exitCode=0 Jan 20 20:08:50 crc kubenswrapper[4948]: I0120 20:08:50.578154 4948 generic.go:334] "Generic (PLEG): container finished" podID="b375751a-1794-4942-9f54-3c726c645fc1" containerID="eb61bc7305f40bab6fbc3688c1490358a8edee5adb11d0222d64c87d01a289f3" exitCode=2 Jan 20 20:08:50 crc kubenswrapper[4948]: I0120 20:08:50.578227 4948 generic.go:334] "Generic (PLEG): container finished" podID="b375751a-1794-4942-9f54-3c726c645fc1" containerID="8f0b57742ecbd83c94f28c788bc9ab3881a4e7a11f2bb5c79770f326266309b5" exitCode=0 Jan 20 20:08:50 crc kubenswrapper[4948]: I0120 20:08:50.589922 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerDied","Data":"a50e2f9ac506d2fa3191bebb8c673eaca742b5ee52d5d9c491ee0b0052cfe37f"} Jan 20 20:08:50 crc kubenswrapper[4948]: I0120 20:08:50.589983 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerDied","Data":"eb61bc7305f40bab6fbc3688c1490358a8edee5adb11d0222d64c87d01a289f3"} Jan 20 20:08:50 crc kubenswrapper[4948]: I0120 20:08:50.590005 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerDied","Data":"8f0b57742ecbd83c94f28c788bc9ab3881a4e7a11f2bb5c79770f326266309b5"} Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.637159 4948 generic.go:334] "Generic (PLEG): container finished" podID="b375751a-1794-4942-9f54-3c726c645fc1" containerID="8d3efb295606be939b1ac9a2e88becffa71fc77ad93e7d978336fe7b9a593217" exitCode=0 Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.637384 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerDied","Data":"8d3efb295606be939b1ac9a2e88becffa71fc77ad93e7d978336fe7b9a593217"} Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.829008 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.977854 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-combined-ca-bundle\") pod \"b375751a-1794-4942-9f54-3c726c645fc1\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.977898 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-scripts\") pod \"b375751a-1794-4942-9f54-3c726c645fc1\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.977970 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flstb\" (UniqueName: \"kubernetes.io/projected/b375751a-1794-4942-9f54-3c726c645fc1-kube-api-access-flstb\") pod \"b375751a-1794-4942-9f54-3c726c645fc1\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.978018 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-log-httpd\") pod \"b375751a-1794-4942-9f54-3c726c645fc1\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.978084 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-ceilometer-tls-certs\") pod \"b375751a-1794-4942-9f54-3c726c645fc1\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.978178 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-run-httpd\") pod \"b375751a-1794-4942-9f54-3c726c645fc1\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.978250 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-config-data\") pod \"b375751a-1794-4942-9f54-3c726c645fc1\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.978279 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-sg-core-conf-yaml\") pod \"b375751a-1794-4942-9f54-3c726c645fc1\" (UID: \"b375751a-1794-4942-9f54-3c726c645fc1\") " Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.978484 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b375751a-1794-4942-9f54-3c726c645fc1" (UID: "b375751a-1794-4942-9f54-3c726c645fc1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.978838 4948 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.978987 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b375751a-1794-4942-9f54-3c726c645fc1" (UID: "b375751a-1794-4942-9f54-3c726c645fc1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:08:54 crc kubenswrapper[4948]: I0120 20:08:54.998271 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-scripts" (OuterVolumeSpecName: "scripts") pod "b375751a-1794-4942-9f54-3c726c645fc1" (UID: "b375751a-1794-4942-9f54-3c726c645fc1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.003970 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b375751a-1794-4942-9f54-3c726c645fc1-kube-api-access-flstb" (OuterVolumeSpecName: "kube-api-access-flstb") pod "b375751a-1794-4942-9f54-3c726c645fc1" (UID: "b375751a-1794-4942-9f54-3c726c645fc1"). InnerVolumeSpecName "kube-api-access-flstb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.050888 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b375751a-1794-4942-9f54-3c726c645fc1" (UID: "b375751a-1794-4942-9f54-3c726c645fc1"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.080950 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.080981 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flstb\" (UniqueName: \"kubernetes.io/projected/b375751a-1794-4942-9f54-3c726c645fc1-kube-api-access-flstb\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.080992 4948 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b375751a-1794-4942-9f54-3c726c645fc1-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.081001 4948 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.088337 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "b375751a-1794-4942-9f54-3c726c645fc1" (UID: "b375751a-1794-4942-9f54-3c726c645fc1"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.114545 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b375751a-1794-4942-9f54-3c726c645fc1" (UID: "b375751a-1794-4942-9f54-3c726c645fc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.119671 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-config-data" (OuterVolumeSpecName: "config-data") pod "b375751a-1794-4942-9f54-3c726c645fc1" (UID: "b375751a-1794-4942-9f54-3c726c645fc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.183187 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.183215 4948 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.183224 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b375751a-1794-4942-9f54-3c726c645fc1-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.651696 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b375751a-1794-4942-9f54-3c726c645fc1","Type":"ContainerDied","Data":"7080a064e5203eb7d2e39ff4777854c5fc015adeb358822cd6e424034599587b"} Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.651773 4948 scope.go:117] "RemoveContainer" containerID="a50e2f9ac506d2fa3191bebb8c673eaca742b5ee52d5d9c491ee0b0052cfe37f" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.651783 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.662664 4948 generic.go:334] "Generic (PLEG): container finished" podID="b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" containerID="eae9735274d1023e219135a04831bdb15fd72c95cdabbd5a07697e6e6c1a4d16" exitCode=0 Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.662727 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-xpn28" event={"ID":"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844","Type":"ContainerDied","Data":"eae9735274d1023e219135a04831bdb15fd72c95cdabbd5a07697e6e6c1a4d16"} Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.693542 4948 scope.go:117] "RemoveContainer" containerID="eb61bc7305f40bab6fbc3688c1490358a8edee5adb11d0222d64c87d01a289f3" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.784490 4948 scope.go:117] "RemoveContainer" containerID="8f0b57742ecbd83c94f28c788bc9ab3881a4e7a11f2bb5c79770f326266309b5" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.792696 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.821858 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.883841 4948 scope.go:117] "RemoveContainer" containerID="8d3efb295606be939b1ac9a2e88becffa71fc77ad93e7d978336fe7b9a593217" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885087 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:55 crc kubenswrapper[4948]: E0120 20:08:55.885580 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="ceilometer-central-agent" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885604 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="ceilometer-central-agent" Jan 20 20:08:55 crc kubenswrapper[4948]: E0120 20:08:55.885623 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="ceilometer-notification-agent" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885631 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="ceilometer-notification-agent" Jan 20 20:08:55 crc kubenswrapper[4948]: E0120 20:08:55.885642 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="sg-core" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885650 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="sg-core" Jan 20 20:08:55 crc kubenswrapper[4948]: E0120 20:08:55.885673 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="proxy-httpd" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885681 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="proxy-httpd" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885935 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="sg-core" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885953 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="proxy-httpd" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885969 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="ceilometer-notification-agent" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.885987 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b375751a-1794-4942-9f54-3c726c645fc1" containerName="ceilometer-central-agent" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.888441 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.894291 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.896282 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.904014 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 20:08:55 crc kubenswrapper[4948]: I0120 20:08:55.904198 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.000683 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.000750 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zv22x\" (UniqueName: \"kubernetes.io/projected/498c1699-0031-4363-8686-5f5cdf52c7b2-kube-api-access-zv22x\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.000810 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-log-httpd\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.000977 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.001038 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-run-httpd\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.001166 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-scripts\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.001209 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.001271 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-config-data\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.103123 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.103757 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zv22x\" (UniqueName: \"kubernetes.io/projected/498c1699-0031-4363-8686-5f5cdf52c7b2-kube-api-access-zv22x\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.105272 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-log-httpd\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.105328 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-log-httpd\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.105561 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.106275 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-run-httpd\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.106692 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-scripts\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.106881 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-run-httpd\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.108252 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.108377 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-config-data\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.112377 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.116072 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-config-data\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.120166 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zv22x\" (UniqueName: \"kubernetes.io/projected/498c1699-0031-4363-8686-5f5cdf52c7b2-kube-api-access-zv22x\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.120507 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.120995 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.124318 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-scripts\") pod \"ceilometer-0\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.207499 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.581692 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b375751a-1794-4942-9f54-3c726c645fc1" path="/var/lib/kubelet/pods/b375751a-1794-4942-9f54-3c726c645fc1/volumes" Jan 20 20:08:56 crc kubenswrapper[4948]: I0120 20:08:56.726995 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.108402 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.146396 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6tzz\" (UniqueName: \"kubernetes.io/projected/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-kube-api-access-q6tzz\") pod \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.146536 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-combined-ca-bundle\") pod \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.146583 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-scripts\") pod \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.146797 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-config-data\") pod \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\" (UID: \"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844\") " Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.156956 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-kube-api-access-q6tzz" (OuterVolumeSpecName: "kube-api-access-q6tzz") pod "b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" (UID: "b6bba308-c57f-4e3a-a2d8-1efb3f1d1844"). InnerVolumeSpecName "kube-api-access-q6tzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.184782 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-scripts" (OuterVolumeSpecName: "scripts") pod "b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" (UID: "b6bba308-c57f-4e3a-a2d8-1efb3f1d1844"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.190729 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" (UID: "b6bba308-c57f-4e3a-a2d8-1efb3f1d1844"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.206081 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-config-data" (OuterVolumeSpecName: "config-data") pod "b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" (UID: "b6bba308-c57f-4e3a-a2d8-1efb3f1d1844"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.249502 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.249552 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6tzz\" (UniqueName: \"kubernetes.io/projected/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-kube-api-access-q6tzz\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.249570 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.249583 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.681728 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerStarted","Data":"f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a"} Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.682028 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerStarted","Data":"525ab86992bfd492625ac50eb3b105a4a01757016fcd82d1d0deee3dba13c2c8"} Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.683750 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-xpn28" event={"ID":"b6bba308-c57f-4e3a-a2d8-1efb3f1d1844","Type":"ContainerDied","Data":"22eb83cc604a9a3b2d45cd5762e6e152e09fc1da9904165c3412b0e58c51da5b"} Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.683780 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22eb83cc604a9a3b2d45cd5762e6e152e09fc1da9904165c3412b0e58c51da5b" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.683897 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-xpn28" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.855179 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 20:08:57 crc kubenswrapper[4948]: E0120 20:08:57.855678 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" containerName="nova-cell0-conductor-db-sync" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.855698 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" containerName="nova-cell0-conductor-db-sync" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.855978 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" containerName="nova-cell0-conductor-db-sync" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.857911 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.862386 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.862532 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-bgvbx" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.881668 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.967626 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c56770f-e8ae-4540-9bb0-34123665502e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.970342 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c56770f-e8ae-4540-9bb0-34123665502e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:57 crc kubenswrapper[4948]: I0120 20:08:57.970391 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n97qd\" (UniqueName: \"kubernetes.io/projected/8c56770f-e8ae-4540-9bb0-34123665502e-kube-api-access-n97qd\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.072561 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c56770f-e8ae-4540-9bb0-34123665502e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.072981 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c56770f-e8ae-4540-9bb0-34123665502e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.073008 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n97qd\" (UniqueName: \"kubernetes.io/projected/8c56770f-e8ae-4540-9bb0-34123665502e-kube-api-access-n97qd\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.077072 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c56770f-e8ae-4540-9bb0-34123665502e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.083548 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c56770f-e8ae-4540-9bb0-34123665502e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.095202 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n97qd\" (UniqueName: \"kubernetes.io/projected/8c56770f-e8ae-4540-9bb0-34123665502e-kube-api-access-n97qd\") pod \"nova-cell0-conductor-0\" (UID: \"8c56770f-e8ae-4540-9bb0-34123665502e\") " pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.176947 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.709453 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 20 20:08:58 crc kubenswrapper[4948]: I0120 20:08:58.715725 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerStarted","Data":"7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928"} Jan 20 20:08:59 crc kubenswrapper[4948]: I0120 20:08:59.395919 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Jan 20 20:08:59 crc kubenswrapper[4948]: I0120 20:08:59.541908 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:08:59 crc kubenswrapper[4948]: I0120 20:08:59.727303 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerStarted","Data":"af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e"} Jan 20 20:08:59 crc kubenswrapper[4948]: I0120 20:08:59.728621 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8c56770f-e8ae-4540-9bb0-34123665502e","Type":"ContainerStarted","Data":"95370a5db6998f548dd03bfeee185306a805ecdcf0c420763fe6a791e630997a"} Jan 20 20:08:59 crc kubenswrapper[4948]: I0120 20:08:59.728644 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"8c56770f-e8ae-4540-9bb0-34123665502e","Type":"ContainerStarted","Data":"53826dbb71c8424f6e4375d0aca420135108bf878c2409c1940c9005f4cf56b2"} Jan 20 20:08:59 crc kubenswrapper[4948]: I0120 20:08:59.729631 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 20 20:09:00 crc kubenswrapper[4948]: I0120 20:09:00.741137 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerStarted","Data":"71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa"} Jan 20 20:09:00 crc kubenswrapper[4948]: I0120 20:09:00.768746 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.768692063 podStartE2EDuration="3.768692063s" podCreationTimestamp="2026-01-20 20:08:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:08:59.749853452 +0000 UTC m=+1167.700578421" watchObservedRunningTime="2026-01-20 20:09:00.768692063 +0000 UTC m=+1168.719417072" Jan 20 20:09:00 crc kubenswrapper[4948]: I0120 20:09:00.783997 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.227689752 podStartE2EDuration="5.783976035s" podCreationTimestamp="2026-01-20 20:08:55 +0000 UTC" firstStartedPulling="2026-01-20 20:08:56.747598996 +0000 UTC m=+1164.698323965" lastFinishedPulling="2026-01-20 20:09:00.303885279 +0000 UTC m=+1168.254610248" observedRunningTime="2026-01-20 20:09:00.76964192 +0000 UTC m=+1168.720366889" watchObservedRunningTime="2026-01-20 20:09:00.783976035 +0000 UTC m=+1168.734701004" Jan 20 20:09:01 crc kubenswrapper[4948]: I0120 20:09:01.749444 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.214659 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.834387 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-rxl64"] Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.835802 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.838841 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.839161 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.853957 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.854005 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk25k\" (UniqueName: \"kubernetes.io/projected/6f3d8a46-101e-416b-b8c7-84c53794528e-kube-api-access-qk25k\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.854096 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-scripts\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.854171 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-config-data\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.861174 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-rxl64"] Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.954942 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-config-data\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.955057 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.955089 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk25k\" (UniqueName: \"kubernetes.io/projected/6f3d8a46-101e-416b-b8c7-84c53794528e-kube-api-access-qk25k\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.955174 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-scripts\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.965451 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-scripts\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:08 crc kubenswrapper[4948]: I0120 20:09:08.986120 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.003588 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-config-data\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.011321 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk25k\" (UniqueName: \"kubernetes.io/projected/6f3d8a46-101e-416b-b8c7-84c53794528e-kube-api-access-qk25k\") pod \"nova-cell0-cell-mapping-rxl64\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.089789 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.101185 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.110109 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.111279 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.112447 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.113969 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.156882 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.157496 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.198155 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.265262 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.265630 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e25e50e7-eae8-4ca6-98d5-c88278e5827e-logs\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.265675 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-config-data\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.265717 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qfrx\" (UniqueName: \"kubernetes.io/projected/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-kube-api-access-2qfrx\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.265781 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.265833 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.265888 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h29ns\" (UniqueName: \"kubernetes.io/projected/e25e50e7-eae8-4ca6-98d5-c88278e5827e-kube-api-access-h29ns\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.327521 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.328683 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.338407 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.366732 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.366798 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e25e50e7-eae8-4ca6-98d5-c88278e5827e-logs\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.366833 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-config-data\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.366854 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qfrx\" (UniqueName: \"kubernetes.io/projected/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-kube-api-access-2qfrx\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.366890 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.366924 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.366966 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h29ns\" (UniqueName: \"kubernetes.io/projected/e25e50e7-eae8-4ca6-98d5-c88278e5827e-kube-api-access-h29ns\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.368325 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e25e50e7-eae8-4ca6-98d5-c88278e5827e-logs\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.408516 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.408625 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-config-data\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.420975 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.421454 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.459680 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.472601 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwjhr\" (UniqueName: \"kubernetes.io/projected/45e577b4-23c3-4979-ba2e-bd07d8d672e8-kube-api-access-nwjhr\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.472671 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-config-data\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.481162 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h29ns\" (UniqueName: \"kubernetes.io/projected/e25e50e7-eae8-4ca6-98d5-c88278e5827e-kube-api-access-h29ns\") pod \"nova-api-0\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.481480 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qfrx\" (UniqueName: \"kubernetes.io/projected/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-kube-api-access-2qfrx\") pod \"nova-cell1-novncproxy-0\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.494856 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.603837 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwjhr\" (UniqueName: \"kubernetes.io/projected/45e577b4-23c3-4979-ba2e-bd07d8d672e8-kube-api-access-nwjhr\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.603905 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-config-data\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.603961 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.640468 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-config-data\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.641315 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.688277 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwjhr\" (UniqueName: \"kubernetes.io/projected/45e577b4-23c3-4979-ba2e-bd07d8d672e8-kube-api-access-nwjhr\") pod \"nova-scheduler-0\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.702953 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.705050 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.716662 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.739839 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.761586 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.770851 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.809838 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.810049 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ba2a684-7bb3-415e-8f36-afcad42f65af-logs\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.810083 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-config-data\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.810135 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r6qt\" (UniqueName: \"kubernetes.io/projected/4ba2a684-7bb3-415e-8f36-afcad42f65af-kube-api-access-8r6qt\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.911920 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ba2a684-7bb3-415e-8f36-afcad42f65af-logs\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.911962 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-config-data\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.912007 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r6qt\" (UniqueName: \"kubernetes.io/projected/4ba2a684-7bb3-415e-8f36-afcad42f65af-kube-api-access-8r6qt\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.912053 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.912388 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ba2a684-7bb3-415e-8f36-afcad42f65af-logs\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.924401 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.926407 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-config-data\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.931394 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-bqnkw"] Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.938108 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.965736 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:09:09 crc kubenswrapper[4948]: I0120 20:09:09.969057 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r6qt\" (UniqueName: \"kubernetes.io/projected/4ba2a684-7bb3-415e-8f36-afcad42f65af-kube-api-access-8r6qt\") pod \"nova-metadata-0\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " pod="openstack/nova-metadata-0" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.013290 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-svc\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.013362 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.013397 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.013433 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmbhx\" (UniqueName: \"kubernetes.io/projected/11a46772-3366-44ee-9479-0be0f0cfaca4-kube-api-access-mmbhx\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.013468 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.013499 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-config\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.067677 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.105060 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-bqnkw"] Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.116899 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.116946 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-config\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.117042 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-svc\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.117076 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.117102 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.117133 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmbhx\" (UniqueName: \"kubernetes.io/projected/11a46772-3366-44ee-9479-0be0f0cfaca4-kube-api-access-mmbhx\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.117871 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.118194 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-svc\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.121371 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.121745 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.128050 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-config\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.164477 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmbhx\" (UniqueName: \"kubernetes.io/projected/11a46772-3366-44ee-9479-0be0f0cfaca4-kube-api-access-mmbhx\") pod \"dnsmasq-dns-757b4f8459-bqnkw\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.413998 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-rxl64"] Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.418435 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:10 crc kubenswrapper[4948]: W0120 20:09:10.439018 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f3d8a46_101e_416b_b8c7_84c53794528e.slice/crio-0ae346c48c2ffaddaec44b597b3455309976d3b326cef7b9d02d523777c8b3ec WatchSource:0}: Error finding container 0ae346c48c2ffaddaec44b597b3455309976d3b326cef7b9d02d523777c8b3ec: Status 404 returned error can't find the container with id 0ae346c48c2ffaddaec44b597b3455309976d3b326cef7b9d02d523777c8b3ec Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.827034 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.865373 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e25e50e7-eae8-4ca6-98d5-c88278e5827e","Type":"ContainerStarted","Data":"40737150f86db37ef3cf379046f9f98f800e4d5a60730a8f221efd99d9b8c41b"} Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.866991 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rxl64" event={"ID":"6f3d8a46-101e-416b-b8c7-84c53794528e","Type":"ContainerStarted","Data":"0ae346c48c2ffaddaec44b597b3455309976d3b326cef7b9d02d523777c8b3ec"} Jan 20 20:09:10 crc kubenswrapper[4948]: I0120 20:09:10.901375 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-rxl64" podStartSLOduration=2.901352172 podStartE2EDuration="2.901352172s" podCreationTimestamp="2026-01-20 20:09:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:10.89102909 +0000 UTC m=+1178.841754059" watchObservedRunningTime="2026-01-20 20:09:10.901352172 +0000 UTC m=+1178.852077131" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.053836 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.147838 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.156136 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.234775 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-bqnkw"] Jan 20 20:09:11 crc kubenswrapper[4948]: W0120 20:09:11.249261 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11a46772_3366_44ee_9479_0be0f0cfaca4.slice/crio-324310ec1665f2df4760454bb02b9c9ad421d8e50b6de8a7cf360d51d419814a WatchSource:0}: Error finding container 324310ec1665f2df4760454bb02b9c9ad421d8e50b6de8a7cf360d51d419814a: Status 404 returned error can't find the container with id 324310ec1665f2df4760454bb02b9c9ad421d8e50b6de8a7cf360d51d419814a Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.454418 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-5x5w6"] Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.456254 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.460180 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.460392 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.470158 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5gld\" (UniqueName: \"kubernetes.io/projected/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-kube-api-access-p5gld\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.470235 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-config-data\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.470308 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-scripts\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.470459 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.496145 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-5x5w6"] Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.571789 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.571980 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5gld\" (UniqueName: \"kubernetes.io/projected/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-kube-api-access-p5gld\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.572040 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-config-data\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.572128 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-scripts\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.578445 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.579392 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-scripts\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.582288 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-config-data\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.599858 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5gld\" (UniqueName: \"kubernetes.io/projected/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-kube-api-access-p5gld\") pod \"nova-cell1-conductor-db-sync-5x5w6\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.795984 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.887857 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45e577b4-23c3-4979-ba2e-bd07d8d672e8","Type":"ContainerStarted","Data":"0d30105789f2398469fbb8f4b07d4e5dd197f6a7c0acdaef40f0d59d2ce91f7d"} Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.891626 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e","Type":"ContainerStarted","Data":"78e6e4f7a8bd264e4f222e17dcedae56be8b0e83c007b5f164460ed6c6a85773"} Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.899161 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rxl64" event={"ID":"6f3d8a46-101e-416b-b8c7-84c53794528e","Type":"ContainerStarted","Data":"d8039a951a0ffd31640fcbfc7fc01adead996729f2091892336370630606b900"} Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.901519 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4ba2a684-7bb3-415e-8f36-afcad42f65af","Type":"ContainerStarted","Data":"8eef21181a202990a0a6074cace9249be98b561c5210ebf6adf8c171d7247330"} Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.904466 4948 generic.go:334] "Generic (PLEG): container finished" podID="11a46772-3366-44ee-9479-0be0f0cfaca4" containerID="74a737bf5d82290a8810d5232c961e118d1224fef675fea127422df5490e61bf" exitCode=0 Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.904493 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" event={"ID":"11a46772-3366-44ee-9479-0be0f0cfaca4","Type":"ContainerDied","Data":"74a737bf5d82290a8810d5232c961e118d1224fef675fea127422df5490e61bf"} Jan 20 20:09:11 crc kubenswrapper[4948]: I0120 20:09:11.904509 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" event={"ID":"11a46772-3366-44ee-9479-0be0f0cfaca4","Type":"ContainerStarted","Data":"324310ec1665f2df4760454bb02b9c9ad421d8e50b6de8a7cf360d51d419814a"} Jan 20 20:09:12 crc kubenswrapper[4948]: I0120 20:09:12.700059 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-5x5w6"] Jan 20 20:09:12 crc kubenswrapper[4948]: I0120 20:09:12.935576 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" event={"ID":"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f","Type":"ContainerStarted","Data":"8088c94f98b09095f78e1f446b01de5d414f989ea14cf269657ad7bf91fa468d"} Jan 20 20:09:12 crc kubenswrapper[4948]: I0120 20:09:12.942890 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" event={"ID":"11a46772-3366-44ee-9479-0be0f0cfaca4","Type":"ContainerStarted","Data":"3d2b3ec4bf9c08452de9b8063c585585547d4154a21b1e338665fd069b6d739f"} Jan 20 20:09:12 crc kubenswrapper[4948]: I0120 20:09:12.943584 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:12 crc kubenswrapper[4948]: I0120 20:09:12.972190 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" podStartSLOduration=3.972170829 podStartE2EDuration="3.972170829s" podCreationTimestamp="2026-01-20 20:09:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:12.963085312 +0000 UTC m=+1180.913810281" watchObservedRunningTime="2026-01-20 20:09:12.972170829 +0000 UTC m=+1180.922895798" Jan 20 20:09:13 crc kubenswrapper[4948]: I0120 20:09:13.593966 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:13 crc kubenswrapper[4948]: I0120 20:09:13.652892 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:13 crc kubenswrapper[4948]: I0120 20:09:13.959739 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" event={"ID":"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f","Type":"ContainerStarted","Data":"3f11b7d6bf5df6c7dddeebe09c92747c57004301c58997190821908a6fc80272"} Jan 20 20:09:13 crc kubenswrapper[4948]: I0120 20:09:13.989545 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" podStartSLOduration=2.989511958 podStartE2EDuration="2.989511958s" podCreationTimestamp="2026-01-20 20:09:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:13.977328353 +0000 UTC m=+1181.928053322" watchObservedRunningTime="2026-01-20 20:09:13.989511958 +0000 UTC m=+1181.940236927" Jan 20 20:09:14 crc kubenswrapper[4948]: I0120 20:09:14.422929 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dd67cb9b-9w4wk" podUID="4d2c0905-915e-4504-8454-ee3500220ab3" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:09:14 crc kubenswrapper[4948]: I0120 20:09:14.557961 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.011512 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e25e50e7-eae8-4ca6-98d5-c88278e5827e","Type":"ContainerStarted","Data":"81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d"} Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.012245 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e25e50e7-eae8-4ca6-98d5-c88278e5827e","Type":"ContainerStarted","Data":"9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e"} Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.016491 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4ba2a684-7bb3-415e-8f36-afcad42f65af","Type":"ContainerStarted","Data":"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5"} Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.016531 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4ba2a684-7bb3-415e-8f36-afcad42f65af","Type":"ContainerStarted","Data":"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78"} Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.016628 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerName="nova-metadata-log" containerID="cri-o://b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78" gracePeriod=30 Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.016924 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerName="nova-metadata-metadata" containerID="cri-o://43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5" gracePeriod=30 Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.019473 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45e577b4-23c3-4979-ba2e-bd07d8d672e8","Type":"ContainerStarted","Data":"f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b"} Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.023912 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e","Type":"ContainerStarted","Data":"3f51cdc2d66e51caed320dd76f165f2f9cfbea33059effd45c21a9af925515a0"} Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.024065 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://3f51cdc2d66e51caed320dd76f165f2f9cfbea33059effd45c21a9af925515a0" gracePeriod=30 Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.040962 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.968180608 podStartE2EDuration="8.040942335s" podCreationTimestamp="2026-01-20 20:09:09 +0000 UTC" firstStartedPulling="2026-01-20 20:09:10.807366354 +0000 UTC m=+1178.758091323" lastFinishedPulling="2026-01-20 20:09:15.880128081 +0000 UTC m=+1183.830853050" observedRunningTime="2026-01-20 20:09:17.035585484 +0000 UTC m=+1184.986310453" watchObservedRunningTime="2026-01-20 20:09:17.040942335 +0000 UTC m=+1184.991667304" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.052944 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.172465865 podStartE2EDuration="8.052927564s" podCreationTimestamp="2026-01-20 20:09:09 +0000 UTC" firstStartedPulling="2026-01-20 20:09:11.000586918 +0000 UTC m=+1178.951311887" lastFinishedPulling="2026-01-20 20:09:15.881048607 +0000 UTC m=+1183.831773586" observedRunningTime="2026-01-20 20:09:17.052450141 +0000 UTC m=+1185.003175100" watchObservedRunningTime="2026-01-20 20:09:17.052927564 +0000 UTC m=+1185.003652533" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.089325 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.226410751 podStartE2EDuration="8.089308273s" podCreationTimestamp="2026-01-20 20:09:09 +0000 UTC" firstStartedPulling="2026-01-20 20:09:11.000191317 +0000 UTC m=+1178.950916296" lastFinishedPulling="2026-01-20 20:09:15.863088849 +0000 UTC m=+1183.813813818" observedRunningTime="2026-01-20 20:09:17.080988698 +0000 UTC m=+1185.031713657" watchObservedRunningTime="2026-01-20 20:09:17.089308273 +0000 UTC m=+1185.040033242" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.112126 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.396329895 podStartE2EDuration="8.112103337s" podCreationTimestamp="2026-01-20 20:09:09 +0000 UTC" firstStartedPulling="2026-01-20 20:09:11.162446305 +0000 UTC m=+1179.113171274" lastFinishedPulling="2026-01-20 20:09:15.878219747 +0000 UTC m=+1183.828944716" observedRunningTime="2026-01-20 20:09:17.100292693 +0000 UTC m=+1185.051017662" watchObservedRunningTime="2026-01-20 20:09:17.112103337 +0000 UTC m=+1185.062828306" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.616509 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.799699 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-config-data\") pod \"4ba2a684-7bb3-415e-8f36-afcad42f65af\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.799834 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ba2a684-7bb3-415e-8f36-afcad42f65af-logs\") pod \"4ba2a684-7bb3-415e-8f36-afcad42f65af\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.799925 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8r6qt\" (UniqueName: \"kubernetes.io/projected/4ba2a684-7bb3-415e-8f36-afcad42f65af-kube-api-access-8r6qt\") pod \"4ba2a684-7bb3-415e-8f36-afcad42f65af\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.800015 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-combined-ca-bundle\") pod \"4ba2a684-7bb3-415e-8f36-afcad42f65af\" (UID: \"4ba2a684-7bb3-415e-8f36-afcad42f65af\") " Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.800356 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ba2a684-7bb3-415e-8f36-afcad42f65af-logs" (OuterVolumeSpecName: "logs") pod "4ba2a684-7bb3-415e-8f36-afcad42f65af" (UID: "4ba2a684-7bb3-415e-8f36-afcad42f65af"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.800801 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4ba2a684-7bb3-415e-8f36-afcad42f65af-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.808935 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ba2a684-7bb3-415e-8f36-afcad42f65af-kube-api-access-8r6qt" (OuterVolumeSpecName: "kube-api-access-8r6qt") pod "4ba2a684-7bb3-415e-8f36-afcad42f65af" (UID: "4ba2a684-7bb3-415e-8f36-afcad42f65af"). InnerVolumeSpecName "kube-api-access-8r6qt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.836402 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-config-data" (OuterVolumeSpecName: "config-data") pod "4ba2a684-7bb3-415e-8f36-afcad42f65af" (UID: "4ba2a684-7bb3-415e-8f36-afcad42f65af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.838167 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ba2a684-7bb3-415e-8f36-afcad42f65af" (UID: "4ba2a684-7bb3-415e-8f36-afcad42f65af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.904254 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8r6qt\" (UniqueName: \"kubernetes.io/projected/4ba2a684-7bb3-415e-8f36-afcad42f65af-kube-api-access-8r6qt\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.904524 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:17 crc kubenswrapper[4948]: I0120 20:09:17.904535 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ba2a684-7bb3-415e-8f36-afcad42f65af-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.038135 4948 generic.go:334] "Generic (PLEG): container finished" podID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerID="43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5" exitCode=0 Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.038176 4948 generic.go:334] "Generic (PLEG): container finished" podID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerID="b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78" exitCode=143 Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.039561 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.044229 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4ba2a684-7bb3-415e-8f36-afcad42f65af","Type":"ContainerDied","Data":"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5"} Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.044295 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4ba2a684-7bb3-415e-8f36-afcad42f65af","Type":"ContainerDied","Data":"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78"} Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.044309 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4ba2a684-7bb3-415e-8f36-afcad42f65af","Type":"ContainerDied","Data":"8eef21181a202990a0a6074cace9249be98b561c5210ebf6adf8c171d7247330"} Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.044338 4948 scope.go:117] "RemoveContainer" containerID="43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.087765 4948 scope.go:117] "RemoveContainer" containerID="b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.087894 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.134376 4948 scope.go:117] "RemoveContainer" containerID="43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5" Jan 20 20:09:18 crc kubenswrapper[4948]: E0120 20:09:18.137757 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5\": container with ID starting with 43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5 not found: ID does not exist" containerID="43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.137843 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5"} err="failed to get container status \"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5\": rpc error: code = NotFound desc = could not find container \"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5\": container with ID starting with 43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5 not found: ID does not exist" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.137884 4948 scope.go:117] "RemoveContainer" containerID="b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78" Jan 20 20:09:18 crc kubenswrapper[4948]: E0120 20:09:18.138392 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78\": container with ID starting with b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78 not found: ID does not exist" containerID="b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.138429 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78"} err="failed to get container status \"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78\": rpc error: code = NotFound desc = could not find container \"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78\": container with ID starting with b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78 not found: ID does not exist" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.138447 4948 scope.go:117] "RemoveContainer" containerID="43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.138930 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.140981 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5"} err="failed to get container status \"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5\": rpc error: code = NotFound desc = could not find container \"43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5\": container with ID starting with 43e5f83d12014155dedd9122536eaf50af8a7c899d95646a12ed70197582a5a5 not found: ID does not exist" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.141032 4948 scope.go:117] "RemoveContainer" containerID="b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.141635 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78"} err="failed to get container status \"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78\": rpc error: code = NotFound desc = could not find container \"b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78\": container with ID starting with b751070cca8462bcb004a4d323a621539ab9a685cbd3f266b49c85be066d4e78 not found: ID does not exist" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.183942 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:18 crc kubenswrapper[4948]: E0120 20:09:18.186608 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerName="nova-metadata-log" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.186634 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerName="nova-metadata-log" Jan 20 20:09:18 crc kubenswrapper[4948]: E0120 20:09:18.186679 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerName="nova-metadata-metadata" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.186686 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerName="nova-metadata-metadata" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.187110 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerName="nova-metadata-metadata" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.187126 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" containerName="nova-metadata-log" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.193244 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.198689 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.198918 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.224769 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.321119 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/807d1797-01bf-4c61-a5cc-c1bb31612707-logs\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.321188 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-config-data\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.321212 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.321665 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.321754 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9nrd\" (UniqueName: \"kubernetes.io/projected/807d1797-01bf-4c61-a5cc-c1bb31612707-kube-api-access-m9nrd\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.423343 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.423397 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9nrd\" (UniqueName: \"kubernetes.io/projected/807d1797-01bf-4c61-a5cc-c1bb31612707-kube-api-access-m9nrd\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.423572 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/807d1797-01bf-4c61-a5cc-c1bb31612707-logs\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.423636 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-config-data\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.423661 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.424844 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/807d1797-01bf-4c61-a5cc-c1bb31612707-logs\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.429495 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-config-data\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.432331 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.442753 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9nrd\" (UniqueName: \"kubernetes.io/projected/807d1797-01bf-4c61-a5cc-c1bb31612707-kube-api-access-m9nrd\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.445092 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.538334 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:18 crc kubenswrapper[4948]: I0120 20:09:18.589362 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ba2a684-7bb3-415e-8f36-afcad42f65af" path="/var/lib/kubelet/pods/4ba2a684-7bb3-415e-8f36-afcad42f65af/volumes" Jan 20 20:09:19 crc kubenswrapper[4948]: I0120 20:09:19.059609 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:19 crc kubenswrapper[4948]: I0120 20:09:19.741140 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 20:09:19 crc kubenswrapper[4948]: I0120 20:09:19.741738 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 20:09:19 crc kubenswrapper[4948]: I0120 20:09:19.762993 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:19 crc kubenswrapper[4948]: I0120 20:09:19.966904 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 20 20:09:19 crc kubenswrapper[4948]: I0120 20:09:19.968241 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.023881 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.065925 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"807d1797-01bf-4c61-a5cc-c1bb31612707","Type":"ContainerStarted","Data":"3f5acb9754ced13fd3d28b9ca2f1d46a11079b808cb3c4ebb301d5b4db7adfb5"} Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.065976 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"807d1797-01bf-4c61-a5cc-c1bb31612707","Type":"ContainerStarted","Data":"c6772c28467f75032265f5bac45e4e78723be25e22a1c3fa647c7207d8e08a1a"} Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.065991 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"807d1797-01bf-4c61-a5cc-c1bb31612707","Type":"ContainerStarted","Data":"7da2163b7a9d9fbd77704edb9cfdd594669cbac5f81d07aabab0cd260cdebba4"} Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.188817 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.210929 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.210908594 podStartE2EDuration="2.210908594s" podCreationTimestamp="2026-01-20 20:09:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:20.13401637 +0000 UTC m=+1188.084741339" watchObservedRunningTime="2026-01-20 20:09:20.210908594 +0000 UTC m=+1188.161633563" Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.421776 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.515772 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-pr8mc"] Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.516241 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" podUID="bd4c5973-d20d-4277-b4df-2438dfc641d8" containerName="dnsmasq-dns" containerID="cri-o://ecf9a5fe437d4ecf14d06208938a593d4105c0583511fd482e857bc588faac44" gracePeriod=10 Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.822890 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.188:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:09:20 crc kubenswrapper[4948]: I0120 20:09:20.822889 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.188:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.093005 4948 generic.go:334] "Generic (PLEG): container finished" podID="bd4c5973-d20d-4277-b4df-2438dfc641d8" containerID="ecf9a5fe437d4ecf14d06208938a593d4105c0583511fd482e857bc588faac44" exitCode=0 Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.094353 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" event={"ID":"bd4c5973-d20d-4277-b4df-2438dfc641d8","Type":"ContainerDied","Data":"ecf9a5fe437d4ecf14d06208938a593d4105c0583511fd482e857bc588faac44"} Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.245753 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.404868 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-config\") pod \"bd4c5973-d20d-4277-b4df-2438dfc641d8\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.405199 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-swift-storage-0\") pod \"bd4c5973-d20d-4277-b4df-2438dfc641d8\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.405468 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzx4j\" (UniqueName: \"kubernetes.io/projected/bd4c5973-d20d-4277-b4df-2438dfc641d8-kube-api-access-rzx4j\") pod \"bd4c5973-d20d-4277-b4df-2438dfc641d8\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.405599 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-svc\") pod \"bd4c5973-d20d-4277-b4df-2438dfc641d8\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.405777 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-sb\") pod \"bd4c5973-d20d-4277-b4df-2438dfc641d8\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.405973 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-nb\") pod \"bd4c5973-d20d-4277-b4df-2438dfc641d8\" (UID: \"bd4c5973-d20d-4277-b4df-2438dfc641d8\") " Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.468933 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd4c5973-d20d-4277-b4df-2438dfc641d8-kube-api-access-rzx4j" (OuterVolumeSpecName: "kube-api-access-rzx4j") pod "bd4c5973-d20d-4277-b4df-2438dfc641d8" (UID: "bd4c5973-d20d-4277-b4df-2438dfc641d8"). InnerVolumeSpecName "kube-api-access-rzx4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.508531 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzx4j\" (UniqueName: \"kubernetes.io/projected/bd4c5973-d20d-4277-b4df-2438dfc641d8-kube-api-access-rzx4j\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.568392 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bd4c5973-d20d-4277-b4df-2438dfc641d8" (UID: "bd4c5973-d20d-4277-b4df-2438dfc641d8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.572635 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bd4c5973-d20d-4277-b4df-2438dfc641d8" (UID: "bd4c5973-d20d-4277-b4df-2438dfc641d8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.579687 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bd4c5973-d20d-4277-b4df-2438dfc641d8" (UID: "bd4c5973-d20d-4277-b4df-2438dfc641d8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.593130 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bd4c5973-d20d-4277-b4df-2438dfc641d8" (UID: "bd4c5973-d20d-4277-b4df-2438dfc641d8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.603167 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-config" (OuterVolumeSpecName: "config") pod "bd4c5973-d20d-4277-b4df-2438dfc641d8" (UID: "bd4c5973-d20d-4277-b4df-2438dfc641d8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.622612 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.622658 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.622674 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.622684 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:21 crc kubenswrapper[4948]: I0120 20:09:21.622696 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd4c5973-d20d-4277-b4df-2438dfc641d8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:22 crc kubenswrapper[4948]: I0120 20:09:22.106923 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" event={"ID":"bd4c5973-d20d-4277-b4df-2438dfc641d8","Type":"ContainerDied","Data":"d9ae499fc2569925d4383a1af600720a02165aed2618c77c12ec33dbb9c0e9a7"} Jan 20 20:09:22 crc kubenswrapper[4948]: I0120 20:09:22.106984 4948 scope.go:117] "RemoveContainer" containerID="ecf9a5fe437d4ecf14d06208938a593d4105c0583511fd482e857bc588faac44" Jan 20 20:09:22 crc kubenswrapper[4948]: I0120 20:09:22.107019 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-pr8mc" Jan 20 20:09:22 crc kubenswrapper[4948]: I0120 20:09:22.156057 4948 scope.go:117] "RemoveContainer" containerID="2350ed0189e540bfad2705253dc5a355eb4fa3176ce9891e477ee8d3198026ed" Jan 20 20:09:22 crc kubenswrapper[4948]: I0120 20:09:22.158980 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-pr8mc"] Jan 20 20:09:22 crc kubenswrapper[4948]: I0120 20:09:22.172180 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-pr8mc"] Jan 20 20:09:22 crc kubenswrapper[4948]: I0120 20:09:22.599591 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd4c5973-d20d-4277-b4df-2438dfc641d8" path="/var/lib/kubelet/pods/bd4c5973-d20d-4277-b4df-2438dfc641d8/volumes" Jan 20 20:09:22 crc kubenswrapper[4948]: I0120 20:09:22.955994 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:09:23 crc kubenswrapper[4948]: I0120 20:09:23.269720 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:09:23 crc kubenswrapper[4948]: I0120 20:09:23.539053 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 20:09:23 crc kubenswrapper[4948]: I0120 20:09:23.540282 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.034222 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-67dd67cb9b-9w4wk" Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.126093 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68bc7c4fc6-4mkmv"] Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.126333 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon-log" containerID="cri-o://6adfd927e96ecfa6c7b6a841fa85196a4b50ebb518e1b96beb40195708ccb40c" gracePeriod=30 Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.126456 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" containerID="cri-o://eb250b4b5dbae1e0a758f7d341fc5c9464138bb0ec515d14abc4b1571a5d19f5" gracePeriod=30 Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.138238 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.155231 4948 generic.go:334] "Generic (PLEG): container finished" podID="6f3d8a46-101e-416b-b8c7-84c53794528e" containerID="d8039a951a0ffd31640fcbfc7fc01adead996729f2091892336370630606b900" exitCode=0 Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.155305 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rxl64" event={"ID":"6f3d8a46-101e-416b-b8c7-84c53794528e","Type":"ContainerDied","Data":"d8039a951a0ffd31640fcbfc7fc01adead996729f2091892336370630606b900"} Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.157618 4948 generic.go:334] "Generic (PLEG): container finished" podID="aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" containerID="3f11b7d6bf5df6c7dddeebe09c92747c57004301c58997190821908a6fc80272" exitCode=0 Jan 20 20:09:25 crc kubenswrapper[4948]: I0120 20:09:25.157664 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" event={"ID":"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f","Type":"ContainerDied","Data":"3f11b7d6bf5df6c7dddeebe09c92747c57004301c58997190821908a6fc80272"} Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.221874 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.691106 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.816229 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.846534 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-config-data\") pod \"6f3d8a46-101e-416b-b8c7-84c53794528e\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.846769 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qk25k\" (UniqueName: \"kubernetes.io/projected/6f3d8a46-101e-416b-b8c7-84c53794528e-kube-api-access-qk25k\") pod \"6f3d8a46-101e-416b-b8c7-84c53794528e\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.846792 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-scripts\") pod \"6f3d8a46-101e-416b-b8c7-84c53794528e\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.846811 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-combined-ca-bundle\") pod \"6f3d8a46-101e-416b-b8c7-84c53794528e\" (UID: \"6f3d8a46-101e-416b-b8c7-84c53794528e\") " Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.857008 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f3d8a46-101e-416b-b8c7-84c53794528e-kube-api-access-qk25k" (OuterVolumeSpecName: "kube-api-access-qk25k") pod "6f3d8a46-101e-416b-b8c7-84c53794528e" (UID: "6f3d8a46-101e-416b-b8c7-84c53794528e"). InnerVolumeSpecName "kube-api-access-qk25k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.857330 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-scripts" (OuterVolumeSpecName: "scripts") pod "6f3d8a46-101e-416b-b8c7-84c53794528e" (UID: "6f3d8a46-101e-416b-b8c7-84c53794528e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.891637 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-config-data" (OuterVolumeSpecName: "config-data") pod "6f3d8a46-101e-416b-b8c7-84c53794528e" (UID: "6f3d8a46-101e-416b-b8c7-84c53794528e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.918245 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f3d8a46-101e-416b-b8c7-84c53794528e" (UID: "6f3d8a46-101e-416b-b8c7-84c53794528e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.948745 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-combined-ca-bundle\") pod \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.948822 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5gld\" (UniqueName: \"kubernetes.io/projected/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-kube-api-access-p5gld\") pod \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.949005 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-scripts\") pod \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.949093 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-config-data\") pod \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\" (UID: \"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f\") " Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.949760 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.949787 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qk25k\" (UniqueName: \"kubernetes.io/projected/6f3d8a46-101e-416b-b8c7-84c53794528e-kube-api-access-qk25k\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.949802 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.949817 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3d8a46-101e-416b-b8c7-84c53794528e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.955075 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-kube-api-access-p5gld" (OuterVolumeSpecName: "kube-api-access-p5gld") pod "aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" (UID: "aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f"). InnerVolumeSpecName "kube-api-access-p5gld". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.957003 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-scripts" (OuterVolumeSpecName: "scripts") pod "aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" (UID: "aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:26 crc kubenswrapper[4948]: I0120 20:09:26.991358 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-config-data" (OuterVolumeSpecName: "config-data") pod "aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" (UID: "aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.001892 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" (UID: "aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.051655 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.051735 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5gld\" (UniqueName: \"kubernetes.io/projected/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-kube-api-access-p5gld\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.051752 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.051763 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.175033 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" event={"ID":"aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f","Type":"ContainerDied","Data":"8088c94f98b09095f78e1f446b01de5d414f989ea14cf269657ad7bf91fa468d"} Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.175550 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8088c94f98b09095f78e1f446b01de5d414f989ea14cf269657ad7bf91fa468d" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.175052 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-5x5w6" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.176698 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-rxl64" event={"ID":"6f3d8a46-101e-416b-b8c7-84c53794528e","Type":"ContainerDied","Data":"0ae346c48c2ffaddaec44b597b3455309976d3b326cef7b9d02d523777c8b3ec"} Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.176758 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-rxl64" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.176769 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ae346c48c2ffaddaec44b597b3455309976d3b326cef7b9d02d523777c8b3ec" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.296105 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 20 20:09:27 crc kubenswrapper[4948]: E0120 20:09:27.296607 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4c5973-d20d-4277-b4df-2438dfc641d8" containerName="init" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.296625 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4c5973-d20d-4277-b4df-2438dfc641d8" containerName="init" Jan 20 20:09:27 crc kubenswrapper[4948]: E0120 20:09:27.296655 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3d8a46-101e-416b-b8c7-84c53794528e" containerName="nova-manage" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.296664 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3d8a46-101e-416b-b8c7-84c53794528e" containerName="nova-manage" Jan 20 20:09:27 crc kubenswrapper[4948]: E0120 20:09:27.296683 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4c5973-d20d-4277-b4df-2438dfc641d8" containerName="dnsmasq-dns" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.296691 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4c5973-d20d-4277-b4df-2438dfc641d8" containerName="dnsmasq-dns" Jan 20 20:09:27 crc kubenswrapper[4948]: E0120 20:09:27.296725 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" containerName="nova-cell1-conductor-db-sync" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.296734 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" containerName="nova-cell1-conductor-db-sync" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.296958 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd4c5973-d20d-4277-b4df-2438dfc641d8" containerName="dnsmasq-dns" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.296981 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f3d8a46-101e-416b-b8c7-84c53794528e" containerName="nova-manage" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.297003 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" containerName="nova-cell1-conductor-db-sync" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.297773 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.305551 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.321276 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.367329 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.367387 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.367669 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxbfv\" (UniqueName: \"kubernetes.io/projected/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-kube-api-access-dxbfv\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.469146 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxbfv\" (UniqueName: \"kubernetes.io/projected/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-kube-api-access-dxbfv\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.469266 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.469286 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.474439 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.482804 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.502377 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.502682 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-log" containerID="cri-o://9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e" gracePeriod=30 Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.502934 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-api" containerID="cri-o://81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d" gracePeriod=30 Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.503829 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxbfv\" (UniqueName: \"kubernetes.io/projected/d3f5f7e6-247c-41c7-877c-f43cf1b1f412-kube-api-access-dxbfv\") pod \"nova-cell1-conductor-0\" (UID: \"d3f5f7e6-247c-41c7-877c-f43cf1b1f412\") " pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.523325 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.523557 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="45e577b4-23c3-4979-ba2e-bd07d8d672e8" containerName="nova-scheduler-scheduler" containerID="cri-o://f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b" gracePeriod=30 Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.556836 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.557140 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerName="nova-metadata-log" containerID="cri-o://c6772c28467f75032265f5bac45e4e78723be25e22a1c3fa647c7207d8e08a1a" gracePeriod=30 Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.557623 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerName="nova-metadata-metadata" containerID="cri-o://3f5acb9754ced13fd3d28b9ca2f1d46a11079b808cb3c4ebb301d5b4db7adfb5" gracePeriod=30 Jan 20 20:09:27 crc kubenswrapper[4948]: I0120 20:09:27.615244 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.122242 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.205355 4948 generic.go:334] "Generic (PLEG): container finished" podID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerID="3f5acb9754ced13fd3d28b9ca2f1d46a11079b808cb3c4ebb301d5b4db7adfb5" exitCode=0 Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.205404 4948 generic.go:334] "Generic (PLEG): container finished" podID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerID="c6772c28467f75032265f5bac45e4e78723be25e22a1c3fa647c7207d8e08a1a" exitCode=143 Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.205461 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"807d1797-01bf-4c61-a5cc-c1bb31612707","Type":"ContainerDied","Data":"3f5acb9754ced13fd3d28b9ca2f1d46a11079b808cb3c4ebb301d5b4db7adfb5"} Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.205491 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"807d1797-01bf-4c61-a5cc-c1bb31612707","Type":"ContainerDied","Data":"c6772c28467f75032265f5bac45e4e78723be25e22a1c3fa647c7207d8e08a1a"} Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.207302 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"d3f5f7e6-247c-41c7-877c-f43cf1b1f412","Type":"ContainerStarted","Data":"5f73189f230358aa17a1f8507772ca16c9ecadab0d2870814cb261fb7b8098a2"} Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.211206 4948 generic.go:334] "Generic (PLEG): container finished" podID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerID="9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e" exitCode=143 Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.211274 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e25e50e7-eae8-4ca6-98d5-c88278e5827e","Type":"ContainerDied","Data":"9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e"} Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.246605 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.291052 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-combined-ca-bundle\") pod \"807d1797-01bf-4c61-a5cc-c1bb31612707\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.291122 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/807d1797-01bf-4c61-a5cc-c1bb31612707-logs\") pod \"807d1797-01bf-4c61-a5cc-c1bb31612707\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.291190 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9nrd\" (UniqueName: \"kubernetes.io/projected/807d1797-01bf-4c61-a5cc-c1bb31612707-kube-api-access-m9nrd\") pod \"807d1797-01bf-4c61-a5cc-c1bb31612707\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.291227 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-config-data\") pod \"807d1797-01bf-4c61-a5cc-c1bb31612707\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.291300 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-nova-metadata-tls-certs\") pod \"807d1797-01bf-4c61-a5cc-c1bb31612707\" (UID: \"807d1797-01bf-4c61-a5cc-c1bb31612707\") " Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.295950 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/807d1797-01bf-4c61-a5cc-c1bb31612707-logs" (OuterVolumeSpecName: "logs") pod "807d1797-01bf-4c61-a5cc-c1bb31612707" (UID: "807d1797-01bf-4c61-a5cc-c1bb31612707"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.322864 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/807d1797-01bf-4c61-a5cc-c1bb31612707-kube-api-access-m9nrd" (OuterVolumeSpecName: "kube-api-access-m9nrd") pod "807d1797-01bf-4c61-a5cc-c1bb31612707" (UID: "807d1797-01bf-4c61-a5cc-c1bb31612707"). InnerVolumeSpecName "kube-api-access-m9nrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.355137 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "807d1797-01bf-4c61-a5cc-c1bb31612707" (UID: "807d1797-01bf-4c61-a5cc-c1bb31612707"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.359397 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-config-data" (OuterVolumeSpecName: "config-data") pod "807d1797-01bf-4c61-a5cc-c1bb31612707" (UID: "807d1797-01bf-4c61-a5cc-c1bb31612707"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.384772 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "807d1797-01bf-4c61-a5cc-c1bb31612707" (UID: "807d1797-01bf-4c61-a5cc-c1bb31612707"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.394367 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9nrd\" (UniqueName: \"kubernetes.io/projected/807d1797-01bf-4c61-a5cc-c1bb31612707-kube-api-access-m9nrd\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.394413 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.394425 4948 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.394435 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/807d1797-01bf-4c61-a5cc-c1bb31612707-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:28 crc kubenswrapper[4948]: I0120 20:09:28.394445 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/807d1797-01bf-4c61-a5cc-c1bb31612707-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.222551 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"807d1797-01bf-4c61-a5cc-c1bb31612707","Type":"ContainerDied","Data":"7da2163b7a9d9fbd77704edb9cfdd594669cbac5f81d07aabab0cd260cdebba4"} Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.222829 4948 scope.go:117] "RemoveContainer" containerID="3f5acb9754ced13fd3d28b9ca2f1d46a11079b808cb3c4ebb301d5b4db7adfb5" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.222871 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.230408 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"d3f5f7e6-247c-41c7-877c-f43cf1b1f412","Type":"ContainerStarted","Data":"c62a8409e9bd863d78958e7193e8249b8510a9fa020ad880ba725b1cec4080f7"} Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.230535 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.248524 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.256699 4948 scope.go:117] "RemoveContainer" containerID="c6772c28467f75032265f5bac45e4e78723be25e22a1c3fa647c7207d8e08a1a" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.276613 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.277790 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.277759316 podStartE2EDuration="2.277759316s" podCreationTimestamp="2026-01-20 20:09:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:29.263114002 +0000 UTC m=+1197.213838981" watchObservedRunningTime="2026-01-20 20:09:29.277759316 +0000 UTC m=+1197.228484285" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.386429 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:29 crc kubenswrapper[4948]: E0120 20:09:29.386854 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerName="nova-metadata-metadata" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.386869 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerName="nova-metadata-metadata" Jan 20 20:09:29 crc kubenswrapper[4948]: E0120 20:09:29.386896 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerName="nova-metadata-log" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.386903 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerName="nova-metadata-log" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.387124 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerName="nova-metadata-log" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.387147 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" containerName="nova-metadata-metadata" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.388177 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.398572 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.401170 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.401468 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.444661 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqqc8\" (UniqueName: \"kubernetes.io/projected/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-kube-api-access-bqqc8\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.444849 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.444946 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.445012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-config-data\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.445030 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-logs\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.547827 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.549198 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.549368 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-config-data\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.549814 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-logs\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.550134 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqqc8\" (UniqueName: \"kubernetes.io/projected/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-kube-api-access-bqqc8\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.551004 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-logs\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.554660 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-config-data\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.555095 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.558587 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.569410 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqqc8\" (UniqueName: \"kubernetes.io/projected/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-kube-api-access-bqqc8\") pod \"nova-metadata-0\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.582421 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:48778->10.217.0.145:8443: read: connection reset by peer" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.583278 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:09:29 crc kubenswrapper[4948]: I0120 20:09:29.734876 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:09:29 crc kubenswrapper[4948]: E0120 20:09:29.970872 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b is running failed: container process not found" containerID="f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 20:09:29 crc kubenswrapper[4948]: E0120 20:09:29.972020 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b is running failed: container process not found" containerID="f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 20:09:29 crc kubenswrapper[4948]: E0120 20:09:29.974343 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b is running failed: container process not found" containerID="f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 20:09:29 crc kubenswrapper[4948]: E0120 20:09:29.974385 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="45e577b4-23c3-4979-ba2e-bd07d8d672e8" containerName="nova-scheduler-scheduler" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.086043 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.162468 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-combined-ca-bundle\") pod \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.162544 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-config-data\") pod \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.162658 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwjhr\" (UniqueName: \"kubernetes.io/projected/45e577b4-23c3-4979-ba2e-bd07d8d672e8-kube-api-access-nwjhr\") pod \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\" (UID: \"45e577b4-23c3-4979-ba2e-bd07d8d672e8\") " Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.174004 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45e577b4-23c3-4979-ba2e-bd07d8d672e8-kube-api-access-nwjhr" (OuterVolumeSpecName: "kube-api-access-nwjhr") pod "45e577b4-23c3-4979-ba2e-bd07d8d672e8" (UID: "45e577b4-23c3-4979-ba2e-bd07d8d672e8"). InnerVolumeSpecName "kube-api-access-nwjhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.198995 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-config-data" (OuterVolumeSpecName: "config-data") pod "45e577b4-23c3-4979-ba2e-bd07d8d672e8" (UID: "45e577b4-23c3-4979-ba2e-bd07d8d672e8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.199117 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45e577b4-23c3-4979-ba2e-bd07d8d672e8" (UID: "45e577b4-23c3-4979-ba2e-bd07d8d672e8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.310634 4948 generic.go:334] "Generic (PLEG): container finished" podID="af522f17-3cad-4004-b112-51e47fa9fea7" containerID="eb250b4b5dbae1e0a758f7d341fc5c9464138bb0ec515d14abc4b1571a5d19f5" exitCode=0 Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.310790 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerDied","Data":"eb250b4b5dbae1e0a758f7d341fc5c9464138bb0ec515d14abc4b1571a5d19f5"} Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.310864 4948 scope.go:117] "RemoveContainer" containerID="f5337fdeea822defb3bda066c6a194da1d66af7fc4c86187fb510469631f72ad" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.317603 4948 generic.go:334] "Generic (PLEG): container finished" podID="45e577b4-23c3-4979-ba2e-bd07d8d672e8" containerID="f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b" exitCode=0 Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.317814 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.323725 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45e577b4-23c3-4979-ba2e-bd07d8d672e8","Type":"ContainerDied","Data":"f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b"} Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.323797 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"45e577b4-23c3-4979-ba2e-bd07d8d672e8","Type":"ContainerDied","Data":"0d30105789f2398469fbb8f4b07d4e5dd197f6a7c0acdaef40f0d59d2ce91f7d"} Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.324038 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.324069 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45e577b4-23c3-4979-ba2e-bd07d8d672e8-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.324080 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwjhr\" (UniqueName: \"kubernetes.io/projected/45e577b4-23c3-4979-ba2e-bd07d8d672e8-kube-api-access-nwjhr\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.386772 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.394377 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.410091 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.420531 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:30 crc kubenswrapper[4948]: E0120 20:09:30.421000 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45e577b4-23c3-4979-ba2e-bd07d8d672e8" containerName="nova-scheduler-scheduler" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.421020 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="45e577b4-23c3-4979-ba2e-bd07d8d672e8" containerName="nova-scheduler-scheduler" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.421249 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="45e577b4-23c3-4979-ba2e-bd07d8d672e8" containerName="nova-scheduler-scheduler" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.426673 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.431293 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.436832 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.527668 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-config-data\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.527940 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flspp\" (UniqueName: \"kubernetes.io/projected/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-kube-api-access-flspp\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.528206 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.536532 4948 scope.go:117] "RemoveContainer" containerID="f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b" Jan 20 20:09:30 crc kubenswrapper[4948]: W0120 20:09:30.538424 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod824bf5c9_bec4_4a65_a69f_6c3d0b7a1b26.slice/crio-30d103d9618d84221f6b19798057b16165b7ace2193ce22cb2c466c273d5eed7 WatchSource:0}: Error finding container 30d103d9618d84221f6b19798057b16165b7ace2193ce22cb2c466c273d5eed7: Status 404 returned error can't find the container with id 30d103d9618d84221f6b19798057b16165b7ace2193ce22cb2c466c273d5eed7 Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.574901 4948 scope.go:117] "RemoveContainer" containerID="f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b" Jan 20 20:09:30 crc kubenswrapper[4948]: E0120 20:09:30.575813 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b\": container with ID starting with f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b not found: ID does not exist" containerID="f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.575849 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b"} err="failed to get container status \"f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b\": rpc error: code = NotFound desc = could not find container \"f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b\": container with ID starting with f69eede1a40184d4684f77f16fa8d708477b173c422dc007831d08496c02090b not found: ID does not exist" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.585749 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45e577b4-23c3-4979-ba2e-bd07d8d672e8" path="/var/lib/kubelet/pods/45e577b4-23c3-4979-ba2e-bd07d8d672e8/volumes" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.586346 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="807d1797-01bf-4c61-a5cc-c1bb31612707" path="/var/lib/kubelet/pods/807d1797-01bf-4c61-a5cc-c1bb31612707/volumes" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.630501 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flspp\" (UniqueName: \"kubernetes.io/projected/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-kube-api-access-flspp\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.630883 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.631002 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-config-data\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.636355 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-config-data\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.649737 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.654210 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flspp\" (UniqueName: \"kubernetes.io/projected/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-kube-api-access-flspp\") pod \"nova-scheduler-0\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " pod="openstack/nova-scheduler-0" Jan 20 20:09:30 crc kubenswrapper[4948]: I0120 20:09:30.749296 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.130547 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: W0120 20:09:31.204622 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c6fe1bc_8f9f_4504_97cc_1ac4905634a8.slice/crio-8af4ed67ea7b4e2e8156924b70d91a9309b84ffa86a6a8b6ef9426dd66a86b3a WatchSource:0}: Error finding container 8af4ed67ea7b4e2e8156924b70d91a9309b84ffa86a6a8b6ef9426dd66a86b3a: Status 404 returned error can't find the container with id 8af4ed67ea7b4e2e8156924b70d91a9309b84ffa86a6a8b6ef9426dd66a86b3a Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.205109 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.246808 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h29ns\" (UniqueName: \"kubernetes.io/projected/e25e50e7-eae8-4ca6-98d5-c88278e5827e-kube-api-access-h29ns\") pod \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.246899 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-config-data\") pod \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.246963 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e25e50e7-eae8-4ca6-98d5-c88278e5827e-logs\") pod \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.247016 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-combined-ca-bundle\") pod \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\" (UID: \"e25e50e7-eae8-4ca6-98d5-c88278e5827e\") " Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.253309 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e25e50e7-eae8-4ca6-98d5-c88278e5827e-logs" (OuterVolumeSpecName: "logs") pod "e25e50e7-eae8-4ca6-98d5-c88278e5827e" (UID: "e25e50e7-eae8-4ca6-98d5-c88278e5827e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.309161 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e25e50e7-eae8-4ca6-98d5-c88278e5827e-kube-api-access-h29ns" (OuterVolumeSpecName: "kube-api-access-h29ns") pod "e25e50e7-eae8-4ca6-98d5-c88278e5827e" (UID: "e25e50e7-eae8-4ca6-98d5-c88278e5827e"). InnerVolumeSpecName "kube-api-access-h29ns". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.340490 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e25e50e7-eae8-4ca6-98d5-c88278e5827e" (UID: "e25e50e7-eae8-4ca6-98d5-c88278e5827e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.342497 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-config-data" (OuterVolumeSpecName: "config-data") pod "e25e50e7-eae8-4ca6-98d5-c88278e5827e" (UID: "e25e50e7-eae8-4ca6-98d5-c88278e5827e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.346288 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26","Type":"ContainerStarted","Data":"ced74b77f9231f99559bcbf5acf84d152938805fd81a9a90bebb671870edbabb"} Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.347090 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26","Type":"ContainerStarted","Data":"30d103d9618d84221f6b19798057b16165b7ace2193ce22cb2c466c273d5eed7"} Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.349183 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h29ns\" (UniqueName: \"kubernetes.io/projected/e25e50e7-eae8-4ca6-98d5-c88278e5827e-kube-api-access-h29ns\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.349215 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.349228 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e25e50e7-eae8-4ca6-98d5-c88278e5827e-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.349239 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e25e50e7-eae8-4ca6-98d5-c88278e5827e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.351939 4948 generic.go:334] "Generic (PLEG): container finished" podID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerID="81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d" exitCode=0 Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.352106 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e25e50e7-eae8-4ca6-98d5-c88278e5827e","Type":"ContainerDied","Data":"81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d"} Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.352138 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e25e50e7-eae8-4ca6-98d5-c88278e5827e","Type":"ContainerDied","Data":"40737150f86db37ef3cf379046f9f98f800e4d5a60730a8f221efd99d9b8c41b"} Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.352181 4948 scope.go:117] "RemoveContainer" containerID="81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.352364 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.368305 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8","Type":"ContainerStarted","Data":"8af4ed67ea7b4e2e8156924b70d91a9309b84ffa86a6a8b6ef9426dd66a86b3a"} Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.444470 4948 scope.go:117] "RemoveContainer" containerID="9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.449127 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.460899 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.494840 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:31 crc kubenswrapper[4948]: E0120 20:09:31.495429 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-api" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.495454 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-api" Jan 20 20:09:31 crc kubenswrapper[4948]: E0120 20:09:31.495478 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-log" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.495488 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-log" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.495780 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-api" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.495806 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" containerName="nova-api-log" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.497282 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.502087 4948 scope.go:117] "RemoveContainer" containerID="81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.502270 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 20 20:09:31 crc kubenswrapper[4948]: E0120 20:09:31.503195 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d\": container with ID starting with 81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d not found: ID does not exist" containerID="81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.503245 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d"} err="failed to get container status \"81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d\": rpc error: code = NotFound desc = could not find container \"81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d\": container with ID starting with 81e16847792a7d4194a55d11e94416c098be0ce307b01c37c330e9ecc1ecda0d not found: ID does not exist" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.503270 4948 scope.go:117] "RemoveContainer" containerID="9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e" Jan 20 20:09:31 crc kubenswrapper[4948]: E0120 20:09:31.504899 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e\": container with ID starting with 9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e not found: ID does not exist" containerID="9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.504929 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e"} err="failed to get container status \"9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e\": rpc error: code = NotFound desc = could not find container \"9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e\": container with ID starting with 9de9a12f9da08481bd646f886840d693291749cefe85407ae41ff3072edd1f7e not found: ID does not exist" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.510894 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.558127 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hztvc\" (UniqueName: \"kubernetes.io/projected/0c55a62b-8726-4451-bb36-ff327f6f5700-kube-api-access-hztvc\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.558357 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-config-data\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.558502 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c55a62b-8726-4451-bb36-ff327f6f5700-logs\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.558692 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.662986 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c55a62b-8726-4451-bb36-ff327f6f5700-logs\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.663182 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.663215 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hztvc\" (UniqueName: \"kubernetes.io/projected/0c55a62b-8726-4451-bb36-ff327f6f5700-kube-api-access-hztvc\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.663241 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-config-data\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.666419 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c55a62b-8726-4451-bb36-ff327f6f5700-logs\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.676480 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-config-data\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.690724 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.708361 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hztvc\" (UniqueName: \"kubernetes.io/projected/0c55a62b-8726-4451-bb36-ff327f6f5700-kube-api-access-hztvc\") pod \"nova-api-0\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " pod="openstack/nova-api-0" Jan 20 20:09:31 crc kubenswrapper[4948]: I0120 20:09:31.824589 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:09:32 crc kubenswrapper[4948]: I0120 20:09:32.407975 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26","Type":"ContainerStarted","Data":"214bbc05e6b10db32eae871db871075877e141ad6abb1fac63a3a9dc5ab0402a"} Jan 20 20:09:32 crc kubenswrapper[4948]: I0120 20:09:32.415383 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8","Type":"ContainerStarted","Data":"53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b"} Jan 20 20:09:32 crc kubenswrapper[4948]: I0120 20:09:32.444476 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.444452353 podStartE2EDuration="3.444452353s" podCreationTimestamp="2026-01-20 20:09:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:32.431417985 +0000 UTC m=+1200.382142954" watchObservedRunningTime="2026-01-20 20:09:32.444452353 +0000 UTC m=+1200.395177322" Jan 20 20:09:32 crc kubenswrapper[4948]: I0120 20:09:32.469073 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.469046449 podStartE2EDuration="2.469046449s" podCreationTimestamp="2026-01-20 20:09:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:32.458178411 +0000 UTC m=+1200.408903390" watchObservedRunningTime="2026-01-20 20:09:32.469046449 +0000 UTC m=+1200.419771418" Jan 20 20:09:32 crc kubenswrapper[4948]: I0120 20:09:32.511905 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:32 crc kubenswrapper[4948]: W0120 20:09:32.518088 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c55a62b_8726_4451_bb36_ff327f6f5700.slice/crio-9d4ac6a00f05cd598824d713457092aae58305606b81ba38b43a4dd90f208232 WatchSource:0}: Error finding container 9d4ac6a00f05cd598824d713457092aae58305606b81ba38b43a4dd90f208232: Status 404 returned error can't find the container with id 9d4ac6a00f05cd598824d713457092aae58305606b81ba38b43a4dd90f208232 Jan 20 20:09:32 crc kubenswrapper[4948]: I0120 20:09:32.620251 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e25e50e7-eae8-4ca6-98d5-c88278e5827e" path="/var/lib/kubelet/pods/e25e50e7-eae8-4ca6-98d5-c88278e5827e/volumes" Jan 20 20:09:33 crc kubenswrapper[4948]: I0120 20:09:33.428155 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0c55a62b-8726-4451-bb36-ff327f6f5700","Type":"ContainerStarted","Data":"caab64a9544c3bb514fc5a62e6790c478903737c9e26b4e30d27670462ff8f91"} Jan 20 20:09:33 crc kubenswrapper[4948]: I0120 20:09:33.428550 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0c55a62b-8726-4451-bb36-ff327f6f5700","Type":"ContainerStarted","Data":"b1777c8467c06fbf2471ef17f23fcfdf748713bea3c1d5b3f2ba19fd9f77e069"} Jan 20 20:09:33 crc kubenswrapper[4948]: I0120 20:09:33.428578 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0c55a62b-8726-4451-bb36-ff327f6f5700","Type":"ContainerStarted","Data":"9d4ac6a00f05cd598824d713457092aae58305606b81ba38b43a4dd90f208232"} Jan 20 20:09:33 crc kubenswrapper[4948]: I0120 20:09:33.448619 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.448592208 podStartE2EDuration="2.448592208s" podCreationTimestamp="2026-01-20 20:09:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:33.443754291 +0000 UTC m=+1201.394479260" watchObservedRunningTime="2026-01-20 20:09:33.448592208 +0000 UTC m=+1201.399317177" Jan 20 20:09:34 crc kubenswrapper[4948]: I0120 20:09:34.735413 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 20:09:34 crc kubenswrapper[4948]: I0120 20:09:34.735784 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 20:09:35 crc kubenswrapper[4948]: I0120 20:09:35.750008 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 20 20:09:37 crc kubenswrapper[4948]: I0120 20:09:37.651033 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 20 20:09:39 crc kubenswrapper[4948]: I0120 20:09:39.540526 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:09:39 crc kubenswrapper[4948]: I0120 20:09:39.735183 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 20 20:09:39 crc kubenswrapper[4948]: I0120 20:09:39.735364 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 20 20:09:40 crc kubenswrapper[4948]: I0120 20:09:40.750187 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 20 20:09:40 crc kubenswrapper[4948]: I0120 20:09:40.750956 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:09:40 crc kubenswrapper[4948]: I0120 20:09:40.750975 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:09:40 crc kubenswrapper[4948]: I0120 20:09:40.777451 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 20 20:09:41 crc kubenswrapper[4948]: I0120 20:09:41.526398 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 20 20:09:41 crc kubenswrapper[4948]: I0120 20:09:41.825161 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 20:09:41 crc kubenswrapper[4948]: I0120 20:09:41.826089 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 20:09:42 crc kubenswrapper[4948]: I0120 20:09:42.916555 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:09:42 crc kubenswrapper[4948]: I0120 20:09:42.916587 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 20 20:09:47 crc kubenswrapper[4948]: I0120 20:09:47.574279 4948 generic.go:334] "Generic (PLEG): container finished" podID="12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" containerID="3f51cdc2d66e51caed320dd76f165f2f9cfbea33059effd45c21a9af925515a0" exitCode=137 Jan 20 20:09:47 crc kubenswrapper[4948]: I0120 20:09:47.574582 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e","Type":"ContainerDied","Data":"3f51cdc2d66e51caed320dd76f165f2f9cfbea33059effd45c21a9af925515a0"} Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.115002 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.313045 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qfrx\" (UniqueName: \"kubernetes.io/projected/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-kube-api-access-2qfrx\") pod \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.313423 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-config-data\") pod \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.313574 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-combined-ca-bundle\") pod \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\" (UID: \"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e\") " Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.318855 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-kube-api-access-2qfrx" (OuterVolumeSpecName: "kube-api-access-2qfrx") pod "12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" (UID: "12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e"). InnerVolumeSpecName "kube-api-access-2qfrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.340908 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" (UID: "12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.355996 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-config-data" (OuterVolumeSpecName: "config-data") pod "12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" (UID: "12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.417696 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.417794 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.417820 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qfrx\" (UniqueName: \"kubernetes.io/projected/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e-kube-api-access-2qfrx\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.585860 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e","Type":"ContainerDied","Data":"78e6e4f7a8bd264e4f222e17dcedae56be8b0e83c007b5f164460ed6c6a85773"} Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.586819 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.587221 4948 scope.go:117] "RemoveContainer" containerID="3f51cdc2d66e51caed320dd76f165f2f9cfbea33059effd45c21a9af925515a0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.658452 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.681631 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.696746 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:48 crc kubenswrapper[4948]: E0120 20:09:48.697166 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" containerName="nova-cell1-novncproxy-novncproxy" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.697180 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" containerName="nova-cell1-novncproxy-novncproxy" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.697384 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" containerName="nova-cell1-novncproxy-novncproxy" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.708218 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.715138 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.715535 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.716543 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.721154 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.826898 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.827411 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.827534 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.827660 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frtlm\" (UniqueName: \"kubernetes.io/projected/8dc0455c-7835-456a-b537-34836da2cdff-kube-api-access-frtlm\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.827942 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.929483 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.929533 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.929568 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frtlm\" (UniqueName: \"kubernetes.io/projected/8dc0455c-7835-456a-b537-34836da2cdff-kube-api-access-frtlm\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.929676 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.929729 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.934462 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.934945 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:48 crc kubenswrapper[4948]: I0120 20:09:48.999780 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.000419 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dc0455c-7835-456a-b537-34836da2cdff-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.011394 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frtlm\" (UniqueName: \"kubernetes.io/projected/8dc0455c-7835-456a-b537-34836da2cdff-kube-api-access-frtlm\") pod \"nova-cell1-novncproxy-0\" (UID: \"8dc0455c-7835-456a-b537-34836da2cdff\") " pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.034817 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.540408 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-68bc7c4fc6-4mkmv" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.145:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.145:8443: connect: connection refused" Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.565369 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 20 20:09:49 crc kubenswrapper[4948]: W0120 20:09:49.572453 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8dc0455c_7835_456a_b537_34836da2cdff.slice/crio-8d40a3cbd8a3427e58ce8ae99a187fe45ab18c298c7005ffdf3c8a22b4d45061 WatchSource:0}: Error finding container 8d40a3cbd8a3427e58ce8ae99a187fe45ab18c298c7005ffdf3c8a22b4d45061: Status 404 returned error can't find the container with id 8d40a3cbd8a3427e58ce8ae99a187fe45ab18c298c7005ffdf3c8a22b4d45061 Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.598977 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8dc0455c-7835-456a-b537-34836da2cdff","Type":"ContainerStarted","Data":"8d40a3cbd8a3427e58ce8ae99a187fe45ab18c298c7005ffdf3c8a22b4d45061"} Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.740168 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.744624 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 20 20:09:49 crc kubenswrapper[4948]: I0120 20:09:49.752579 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 20 20:09:50 crc kubenswrapper[4948]: I0120 20:09:50.583419 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e" path="/var/lib/kubelet/pods/12db55ff-dd59-497e-b5b0-ef3a5d0f8c1e/volumes" Jan 20 20:09:50 crc kubenswrapper[4948]: I0120 20:09:50.609507 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"8dc0455c-7835-456a-b537-34836da2cdff","Type":"ContainerStarted","Data":"decaab3b0b1f4966b45289b85612e67356f2e366f76fadbf4670e4e2815edcbc"} Jan 20 20:09:50 crc kubenswrapper[4948]: I0120 20:09:50.628657 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.628632461 podStartE2EDuration="2.628632461s" podCreationTimestamp="2026-01-20 20:09:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:50.627318724 +0000 UTC m=+1218.578043693" watchObservedRunningTime="2026-01-20 20:09:50.628632461 +0000 UTC m=+1218.579357430" Jan 20 20:09:50 crc kubenswrapper[4948]: I0120 20:09:50.632815 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 20 20:09:51 crc kubenswrapper[4948]: I0120 20:09:51.832165 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 20 20:09:51 crc kubenswrapper[4948]: I0120 20:09:51.832935 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 20 20:09:51 crc kubenswrapper[4948]: I0120 20:09:51.836083 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 20 20:09:51 crc kubenswrapper[4948]: I0120 20:09:51.838211 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 20 20:09:52 crc kubenswrapper[4948]: I0120 20:09:52.666651 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 20 20:09:52 crc kubenswrapper[4948]: I0120 20:09:52.715191 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.043856 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-zk22b"] Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.047895 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.074348 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-zk22b"] Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.208616 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.208688 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.208883 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.208938 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmngg\" (UniqueName: \"kubernetes.io/projected/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-kube-api-access-rmngg\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.208976 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-config\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.209086 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.311413 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmngg\" (UniqueName: \"kubernetes.io/projected/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-kube-api-access-rmngg\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.311462 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-config\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.311488 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.311549 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.311597 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.311691 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.312743 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.312755 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.312778 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-config\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.315034 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.315104 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.345489 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmngg\" (UniqueName: \"kubernetes.io/projected/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-kube-api-access-rmngg\") pod \"dnsmasq-dns-89c5cd4d5-zk22b\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.386865 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:53 crc kubenswrapper[4948]: W0120 20:09:53.953208 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5219f6f2_82bd_4f53_8f8c_be82ae5acbc3.slice/crio-ba182ea099880231c785fee90ea789b34d6c3a16d26ae029f1b91f111582ab53 WatchSource:0}: Error finding container ba182ea099880231c785fee90ea789b34d6c3a16d26ae029f1b91f111582ab53: Status 404 returned error can't find the container with id ba182ea099880231c785fee90ea789b34d6c3a16d26ae029f1b91f111582ab53 Jan 20 20:09:53 crc kubenswrapper[4948]: I0120 20:09:53.957382 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-zk22b"] Jan 20 20:09:54 crc kubenswrapper[4948]: I0120 20:09:54.076025 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:54 crc kubenswrapper[4948]: I0120 20:09:54.692027 4948 generic.go:334] "Generic (PLEG): container finished" podID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerID="75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447" exitCode=0 Jan 20 20:09:54 crc kubenswrapper[4948]: I0120 20:09:54.694092 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" event={"ID":"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3","Type":"ContainerDied","Data":"75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447"} Jan 20 20:09:54 crc kubenswrapper[4948]: I0120 20:09:54.694134 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" event={"ID":"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3","Type":"ContainerStarted","Data":"ba182ea099880231c785fee90ea789b34d6c3a16d26ae029f1b91f111582ab53"} Jan 20 20:09:55 crc kubenswrapper[4948]: I0120 20:09:55.717812 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" event={"ID":"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3","Type":"ContainerStarted","Data":"829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85"} Jan 20 20:09:55 crc kubenswrapper[4948]: I0120 20:09:55.719349 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:09:55 crc kubenswrapper[4948]: I0120 20:09:55.729339 4948 generic.go:334] "Generic (PLEG): container finished" podID="af522f17-3cad-4004-b112-51e47fa9fea7" containerID="6adfd927e96ecfa6c7b6a841fa85196a4b50ebb518e1b96beb40195708ccb40c" exitCode=137 Jan 20 20:09:55 crc kubenswrapper[4948]: I0120 20:09:55.729399 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerDied","Data":"6adfd927e96ecfa6c7b6a841fa85196a4b50ebb518e1b96beb40195708ccb40c"} Jan 20 20:09:55 crc kubenswrapper[4948]: I0120 20:09:55.760556 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" podStartSLOduration=2.760520749 podStartE2EDuration="2.760520749s" podCreationTimestamp="2026-01-20 20:09:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:09:55.750164466 +0000 UTC m=+1223.700889435" watchObservedRunningTime="2026-01-20 20:09:55.760520749 +0000 UTC m=+1223.711245718" Jan 20 20:09:55 crc kubenswrapper[4948]: I0120 20:09:55.950322 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:09:55 crc kubenswrapper[4948]: I0120 20:09:55.950845 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-log" containerID="cri-o://b1777c8467c06fbf2471ef17f23fcfdf748713bea3c1d5b3f2ba19fd9f77e069" gracePeriod=30 Jan 20 20:09:55 crc kubenswrapper[4948]: I0120 20:09:55.951284 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-api" containerID="cri-o://caab64a9544c3bb514fc5a62e6790c478903737c9e26b4e30d27670462ff8f91" gracePeriod=30 Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.317586 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.431698 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjmfr\" (UniqueName: \"kubernetes.io/projected/af522f17-3cad-4004-b112-51e47fa9fea7-kube-api-access-wjmfr\") pod \"af522f17-3cad-4004-b112-51e47fa9fea7\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.431888 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-config-data\") pod \"af522f17-3cad-4004-b112-51e47fa9fea7\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.431939 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-tls-certs\") pod \"af522f17-3cad-4004-b112-51e47fa9fea7\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.431967 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-secret-key\") pod \"af522f17-3cad-4004-b112-51e47fa9fea7\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.431986 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-scripts\") pod \"af522f17-3cad-4004-b112-51e47fa9fea7\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.432060 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-combined-ca-bundle\") pod \"af522f17-3cad-4004-b112-51e47fa9fea7\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.432091 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af522f17-3cad-4004-b112-51e47fa9fea7-logs\") pod \"af522f17-3cad-4004-b112-51e47fa9fea7\" (UID: \"af522f17-3cad-4004-b112-51e47fa9fea7\") " Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.433064 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af522f17-3cad-4004-b112-51e47fa9fea7-logs" (OuterVolumeSpecName: "logs") pod "af522f17-3cad-4004-b112-51e47fa9fea7" (UID: "af522f17-3cad-4004-b112-51e47fa9fea7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.437729 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af522f17-3cad-4004-b112-51e47fa9fea7-kube-api-access-wjmfr" (OuterVolumeSpecName: "kube-api-access-wjmfr") pod "af522f17-3cad-4004-b112-51e47fa9fea7" (UID: "af522f17-3cad-4004-b112-51e47fa9fea7"). InnerVolumeSpecName "kube-api-access-wjmfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.446092 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "af522f17-3cad-4004-b112-51e47fa9fea7" (UID: "af522f17-3cad-4004-b112-51e47fa9fea7"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.485548 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-config-data" (OuterVolumeSpecName: "config-data") pod "af522f17-3cad-4004-b112-51e47fa9fea7" (UID: "af522f17-3cad-4004-b112-51e47fa9fea7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.488004 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "af522f17-3cad-4004-b112-51e47fa9fea7" (UID: "af522f17-3cad-4004-b112-51e47fa9fea7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.503492 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-scripts" (OuterVolumeSpecName: "scripts") pod "af522f17-3cad-4004-b112-51e47fa9fea7" (UID: "af522f17-3cad-4004-b112-51e47fa9fea7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.514908 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "af522f17-3cad-4004-b112-51e47fa9fea7" (UID: "af522f17-3cad-4004-b112-51e47fa9fea7"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.534649 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjmfr\" (UniqueName: \"kubernetes.io/projected/af522f17-3cad-4004-b112-51e47fa9fea7-kube-api-access-wjmfr\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.534681 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.534691 4948 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.534714 4948 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.534724 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/af522f17-3cad-4004-b112-51e47fa9fea7-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.534734 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af522f17-3cad-4004-b112-51e47fa9fea7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.534742 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/af522f17-3cad-4004-b112-51e47fa9fea7-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.730955 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.731292 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="proxy-httpd" containerID="cri-o://71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa" gracePeriod=30 Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.731393 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="sg-core" containerID="cri-o://af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e" gracePeriod=30 Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.731426 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="ceilometer-notification-agent" containerID="cri-o://7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928" gracePeriod=30 Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.731261 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="ceilometer-central-agent" containerID="cri-o://f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a" gracePeriod=30 Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.744187 4948 generic.go:334] "Generic (PLEG): container finished" podID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerID="b1777c8467c06fbf2471ef17f23fcfdf748713bea3c1d5b3f2ba19fd9f77e069" exitCode=143 Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.744221 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0c55a62b-8726-4451-bb36-ff327f6f5700","Type":"ContainerDied","Data":"b1777c8467c06fbf2471ef17f23fcfdf748713bea3c1d5b3f2ba19fd9f77e069"} Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.746005 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-68bc7c4fc6-4mkmv" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.746049 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-68bc7c4fc6-4mkmv" event={"ID":"af522f17-3cad-4004-b112-51e47fa9fea7","Type":"ContainerDied","Data":"d06b8f94f0291b54cfb083803fd5b146b483e1fab43f2786bc947a6f421aca66"} Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.746127 4948 scope.go:117] "RemoveContainer" containerID="eb250b4b5dbae1e0a758f7d341fc5c9464138bb0ec515d14abc4b1571a5d19f5" Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.775331 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-68bc7c4fc6-4mkmv"] Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.795365 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-68bc7c4fc6-4mkmv"] Jan 20 20:09:56 crc kubenswrapper[4948]: I0120 20:09:56.925638 4948 scope.go:117] "RemoveContainer" containerID="6adfd927e96ecfa6c7b6a841fa85196a4b50ebb518e1b96beb40195708ccb40c" Jan 20 20:09:57 crc kubenswrapper[4948]: I0120 20:09:57.760412 4948 generic.go:334] "Generic (PLEG): container finished" podID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerID="71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa" exitCode=0 Jan 20 20:09:57 crc kubenswrapper[4948]: I0120 20:09:57.760806 4948 generic.go:334] "Generic (PLEG): container finished" podID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerID="af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e" exitCode=2 Jan 20 20:09:57 crc kubenswrapper[4948]: I0120 20:09:57.760821 4948 generic.go:334] "Generic (PLEG): container finished" podID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerID="f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a" exitCode=0 Jan 20 20:09:57 crc kubenswrapper[4948]: I0120 20:09:57.760845 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerDied","Data":"71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa"} Jan 20 20:09:57 crc kubenswrapper[4948]: I0120 20:09:57.760878 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerDied","Data":"af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e"} Jan 20 20:09:57 crc kubenswrapper[4948]: I0120 20:09:57.760894 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerDied","Data":"f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a"} Jan 20 20:09:58 crc kubenswrapper[4948]: I0120 20:09:58.602253 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" path="/var/lib/kubelet/pods/af522f17-3cad-4004-b112-51e47fa9fea7/volumes" Jan 20 20:09:59 crc kubenswrapper[4948]: I0120 20:09:59.036347 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:59 crc kubenswrapper[4948]: I0120 20:09:59.060848 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:09:59 crc kubenswrapper[4948]: I0120 20:09:59.780049 4948 generic.go:334] "Generic (PLEG): container finished" podID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerID="caab64a9544c3bb514fc5a62e6790c478903737c9e26b4e30d27670462ff8f91" exitCode=0 Jan 20 20:09:59 crc kubenswrapper[4948]: I0120 20:09:59.780227 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0c55a62b-8726-4451-bb36-ff327f6f5700","Type":"ContainerDied","Data":"caab64a9544c3bb514fc5a62e6790c478903737c9e26b4e30d27670462ff8f91"} Jan 20 20:09:59 crc kubenswrapper[4948]: I0120 20:09:59.866305 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.131076 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-gfmgp"] Jan 20 20:10:00 crc kubenswrapper[4948]: E0120 20:10:00.131700 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.131734 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: E0120 20:10:00.131748 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.131756 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: E0120 20:10:00.131768 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon-log" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.131778 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon-log" Jan 20 20:10:00 crc kubenswrapper[4948]: E0120 20:10:00.131801 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.131809 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.132056 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.132073 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon-log" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.132088 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.132100 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="af522f17-3cad-4004-b112-51e47fa9fea7" containerName="horizon" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.133389 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.137162 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.138560 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.156139 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gfmgp"] Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.171952 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.172124 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-config-data\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.172209 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgm7p\" (UniqueName: \"kubernetes.io/projected/5d2feaec-203c-425a-86bf-c7681f07bafd-kube-api-access-lgm7p\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.172249 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-scripts\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.223754 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.275646 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgm7p\" (UniqueName: \"kubernetes.io/projected/5d2feaec-203c-425a-86bf-c7681f07bafd-kube-api-access-lgm7p\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.275818 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-scripts\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.276040 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.276130 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-config-data\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.305362 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.307311 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-scripts\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.309715 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-config-data\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.311936 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgm7p\" (UniqueName: \"kubernetes.io/projected/5d2feaec-203c-425a-86bf-c7681f07bafd-kube-api-access-lgm7p\") pod \"nova-cell1-cell-mapping-gfmgp\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.382277 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hztvc\" (UniqueName: \"kubernetes.io/projected/0c55a62b-8726-4451-bb36-ff327f6f5700-kube-api-access-hztvc\") pod \"0c55a62b-8726-4451-bb36-ff327f6f5700\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.382362 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-config-data\") pod \"0c55a62b-8726-4451-bb36-ff327f6f5700\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.382445 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c55a62b-8726-4451-bb36-ff327f6f5700-logs\") pod \"0c55a62b-8726-4451-bb36-ff327f6f5700\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.382515 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-combined-ca-bundle\") pod \"0c55a62b-8726-4451-bb36-ff327f6f5700\" (UID: \"0c55a62b-8726-4451-bb36-ff327f6f5700\") " Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.385181 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c55a62b-8726-4451-bb36-ff327f6f5700-logs" (OuterVolumeSpecName: "logs") pod "0c55a62b-8726-4451-bb36-ff327f6f5700" (UID: "0c55a62b-8726-4451-bb36-ff327f6f5700"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.399228 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c55a62b-8726-4451-bb36-ff327f6f5700-kube-api-access-hztvc" (OuterVolumeSpecName: "kube-api-access-hztvc") pod "0c55a62b-8726-4451-bb36-ff327f6f5700" (UID: "0c55a62b-8726-4451-bb36-ff327f6f5700"). InnerVolumeSpecName "kube-api-access-hztvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.437153 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c55a62b-8726-4451-bb36-ff327f6f5700" (UID: "0c55a62b-8726-4451-bb36-ff327f6f5700"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.457098 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-config-data" (OuterVolumeSpecName: "config-data") pod "0c55a62b-8726-4451-bb36-ff327f6f5700" (UID: "0c55a62b-8726-4451-bb36-ff327f6f5700"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.484384 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hztvc\" (UniqueName: \"kubernetes.io/projected/0c55a62b-8726-4451-bb36-ff327f6f5700-kube-api-access-hztvc\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.484424 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.484434 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c55a62b-8726-4451-bb36-ff327f6f5700-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.484447 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c55a62b-8726-4451-bb36-ff327f6f5700-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.551200 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.805584 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.806293 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0c55a62b-8726-4451-bb36-ff327f6f5700","Type":"ContainerDied","Data":"9d4ac6a00f05cd598824d713457092aae58305606b81ba38b43a4dd90f208232"} Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.806388 4948 scope.go:117] "RemoveContainer" containerID="caab64a9544c3bb514fc5a62e6790c478903737c9e26b4e30d27670462ff8f91" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.852241 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.865193 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.886890 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:00 crc kubenswrapper[4948]: E0120 20:10:00.887368 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-log" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.887389 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-log" Jan 20 20:10:00 crc kubenswrapper[4948]: E0120 20:10:00.887421 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-api" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.887427 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-api" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.888309 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-log" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.888332 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" containerName="nova-api-api" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.889809 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.897205 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.897483 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.897621 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.898493 4948 scope.go:117] "RemoveContainer" containerID="b1777c8467c06fbf2471ef17f23fcfdf748713bea3c1d5b3f2ba19fd9f77e069" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.902998 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.993952 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-public-tls-certs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.994118 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-internal-tls-certs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.994153 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0eaf22-41f0-4b2f-b93e-36715d9e8499-logs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.994218 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-config-data\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.994286 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:00 crc kubenswrapper[4948]: I0120 20:10:00.994364 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjtwx\" (UniqueName: \"kubernetes.io/projected/da0eaf22-41f0-4b2f-b93e-36715d9e8499-kube-api-access-hjtwx\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.024847 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gfmgp"] Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.097345 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-internal-tls-certs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.097672 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0eaf22-41f0-4b2f-b93e-36715d9e8499-logs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.097830 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-config-data\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.097897 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.097946 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjtwx\" (UniqueName: \"kubernetes.io/projected/da0eaf22-41f0-4b2f-b93e-36715d9e8499-kube-api-access-hjtwx\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.098022 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-public-tls-certs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.098448 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0eaf22-41f0-4b2f-b93e-36715d9e8499-logs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.101914 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-internal-tls-certs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.103043 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-public-tls-certs\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.105686 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-config-data\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.110055 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.119157 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjtwx\" (UniqueName: \"kubernetes.io/projected/da0eaf22-41f0-4b2f-b93e-36715d9e8499-kube-api-access-hjtwx\") pod \"nova-api-0\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.233893 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.772820 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.820228 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da0eaf22-41f0-4b2f-b93e-36715d9e8499","Type":"ContainerStarted","Data":"65d064e4d0c8dfa1ffe68c516f261565718e50e0878e2acd6ef0ad7f9b6873c8"} Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.823618 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gfmgp" event={"ID":"5d2feaec-203c-425a-86bf-c7681f07bafd","Type":"ContainerStarted","Data":"8cc835529b854c5ab517f1ba92dede45b691a9de124e026a24407c65d2235fc2"} Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.823686 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gfmgp" event={"ID":"5d2feaec-203c-425a-86bf-c7681f07bafd","Type":"ContainerStarted","Data":"2209e0cedb9332277d82b217cedf3970356e0059ce306d6c272c11bf3f0af5ca"} Jan 20 20:10:01 crc kubenswrapper[4948]: I0120 20:10:01.856542 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-gfmgp" podStartSLOduration=1.85649337 podStartE2EDuration="1.85649337s" podCreationTimestamp="2026-01-20 20:10:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:10:01.847556377 +0000 UTC m=+1229.798281356" watchObservedRunningTime="2026-01-20 20:10:01.85649337 +0000 UTC m=+1229.807218339" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.584101 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c55a62b-8726-4451-bb36-ff327f6f5700" path="/var/lib/kubelet/pods/0c55a62b-8726-4451-bb36-ff327f6f5700/volumes" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.596130 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.669309 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-scripts\") pod \"498c1699-0031-4363-8686-5f5cdf52c7b2\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.669395 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zv22x\" (UniqueName: \"kubernetes.io/projected/498c1699-0031-4363-8686-5f5cdf52c7b2-kube-api-access-zv22x\") pod \"498c1699-0031-4363-8686-5f5cdf52c7b2\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.669431 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-ceilometer-tls-certs\") pod \"498c1699-0031-4363-8686-5f5cdf52c7b2\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.669486 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-log-httpd\") pod \"498c1699-0031-4363-8686-5f5cdf52c7b2\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.669558 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-combined-ca-bundle\") pod \"498c1699-0031-4363-8686-5f5cdf52c7b2\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.669595 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-sg-core-conf-yaml\") pod \"498c1699-0031-4363-8686-5f5cdf52c7b2\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.669609 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-config-data\") pod \"498c1699-0031-4363-8686-5f5cdf52c7b2\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.669649 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-run-httpd\") pod \"498c1699-0031-4363-8686-5f5cdf52c7b2\" (UID: \"498c1699-0031-4363-8686-5f5cdf52c7b2\") " Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.670491 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "498c1699-0031-4363-8686-5f5cdf52c7b2" (UID: "498c1699-0031-4363-8686-5f5cdf52c7b2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.671265 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "498c1699-0031-4363-8686-5f5cdf52c7b2" (UID: "498c1699-0031-4363-8686-5f5cdf52c7b2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.693992 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/498c1699-0031-4363-8686-5f5cdf52c7b2-kube-api-access-zv22x" (OuterVolumeSpecName: "kube-api-access-zv22x") pod "498c1699-0031-4363-8686-5f5cdf52c7b2" (UID: "498c1699-0031-4363-8686-5f5cdf52c7b2"). InnerVolumeSpecName "kube-api-access-zv22x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.702895 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-scripts" (OuterVolumeSpecName: "scripts") pod "498c1699-0031-4363-8686-5f5cdf52c7b2" (UID: "498c1699-0031-4363-8686-5f5cdf52c7b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.779822 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zv22x\" (UniqueName: \"kubernetes.io/projected/498c1699-0031-4363-8686-5f5cdf52c7b2-kube-api-access-zv22x\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.779855 4948 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.779865 4948 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/498c1699-0031-4363-8686-5f5cdf52c7b2-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.779874 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.802262 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "498c1699-0031-4363-8686-5f5cdf52c7b2" (UID: "498c1699-0031-4363-8686-5f5cdf52c7b2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.886447 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "498c1699-0031-4363-8686-5f5cdf52c7b2" (UID: "498c1699-0031-4363-8686-5f5cdf52c7b2"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.887675 4948 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.887694 4948 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.926601 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da0eaf22-41f0-4b2f-b93e-36715d9e8499","Type":"ContainerStarted","Data":"c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1"} Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.926644 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da0eaf22-41f0-4b2f-b93e-36715d9e8499","Type":"ContainerStarted","Data":"df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea"} Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.961851 4948 generic.go:334] "Generic (PLEG): container finished" podID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerID="7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928" exitCode=0 Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.962957 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.963154 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerDied","Data":"7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928"} Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.963179 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"498c1699-0031-4363-8686-5f5cdf52c7b2","Type":"ContainerDied","Data":"525ab86992bfd492625ac50eb3b105a4a01757016fcd82d1d0deee3dba13c2c8"} Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.963195 4948 scope.go:117] "RemoveContainer" containerID="71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa" Jan 20 20:10:02 crc kubenswrapper[4948]: I0120 20:10:02.967930 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.967912008 podStartE2EDuration="2.967912008s" podCreationTimestamp="2026-01-20 20:10:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:10:02.963871973 +0000 UTC m=+1230.914596942" watchObservedRunningTime="2026-01-20 20:10:02.967912008 +0000 UTC m=+1230.918636977" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.016071 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "498c1699-0031-4363-8686-5f5cdf52c7b2" (UID: "498c1699-0031-4363-8686-5f5cdf52c7b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.033596 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-config-data" (OuterVolumeSpecName: "config-data") pod "498c1699-0031-4363-8686-5f5cdf52c7b2" (UID: "498c1699-0031-4363-8686-5f5cdf52c7b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.042352 4948 scope.go:117] "RemoveContainer" containerID="af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.081936 4948 scope.go:117] "RemoveContainer" containerID="7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.112424 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.112453 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/498c1699-0031-4363-8686-5f5cdf52c7b2-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.113570 4948 scope.go:117] "RemoveContainer" containerID="f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.140987 4948 scope.go:117] "RemoveContainer" containerID="71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa" Jan 20 20:10:03 crc kubenswrapper[4948]: E0120 20:10:03.141510 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa\": container with ID starting with 71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa not found: ID does not exist" containerID="71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.141545 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa"} err="failed to get container status \"71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa\": rpc error: code = NotFound desc = could not find container \"71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa\": container with ID starting with 71269fdddc18c13f0e591753fc4d76c51a376af810b8188e329bfab295a97afa not found: ID does not exist" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.141567 4948 scope.go:117] "RemoveContainer" containerID="af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e" Jan 20 20:10:03 crc kubenswrapper[4948]: E0120 20:10:03.152321 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e\": container with ID starting with af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e not found: ID does not exist" containerID="af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.152385 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e"} err="failed to get container status \"af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e\": rpc error: code = NotFound desc = could not find container \"af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e\": container with ID starting with af15ec2683a453ae7c359337e06176ad45c44034a625cf2eca790aa669ad237e not found: ID does not exist" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.152417 4948 scope.go:117] "RemoveContainer" containerID="7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928" Jan 20 20:10:03 crc kubenswrapper[4948]: E0120 20:10:03.153125 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928\": container with ID starting with 7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928 not found: ID does not exist" containerID="7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.153155 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928"} err="failed to get container status \"7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928\": rpc error: code = NotFound desc = could not find container \"7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928\": container with ID starting with 7d744ed52b7ae7eb7df0a7de9d4ab6a36afc057396e4f4c7bed7a58f1e9f2928 not found: ID does not exist" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.153171 4948 scope.go:117] "RemoveContainer" containerID="f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a" Jan 20 20:10:03 crc kubenswrapper[4948]: E0120 20:10:03.153892 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a\": container with ID starting with f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a not found: ID does not exist" containerID="f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.153923 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a"} err="failed to get container status \"f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a\": rpc error: code = NotFound desc = could not find container \"f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a\": container with ID starting with f4ab6330e307fbb2d99c8e8ecbf57669d832ed9d1fe156a6fdcf58eab1056d9a not found: ID does not exist" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.303154 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.315115 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.338172 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:10:03 crc kubenswrapper[4948]: E0120 20:10:03.339031 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="ceilometer-central-agent" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.339151 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="ceilometer-central-agent" Jan 20 20:10:03 crc kubenswrapper[4948]: E0120 20:10:03.339263 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="sg-core" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.339346 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="sg-core" Jan 20 20:10:03 crc kubenswrapper[4948]: E0120 20:10:03.339424 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="ceilometer-notification-agent" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.339496 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="ceilometer-notification-agent" Jan 20 20:10:03 crc kubenswrapper[4948]: E0120 20:10:03.339575 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="proxy-httpd" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.339642 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="proxy-httpd" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.339984 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="proxy-httpd" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.340090 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="ceilometer-notification-agent" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.340182 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="sg-core" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.340279 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" containerName="ceilometer-central-agent" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.342281 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.345864 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.346134 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.356335 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.370469 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.389222 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.418504 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad8829d7-3d58-4752-9f62-83663e2dad23-log-httpd\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.418578 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.418621 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-config-data\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.418674 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-scripts\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.422425 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.422495 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.422535 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd5b7\" (UniqueName: \"kubernetes.io/projected/ad8829d7-3d58-4752-9f62-83663e2dad23-kube-api-access-qd5b7\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.422684 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad8829d7-3d58-4752-9f62-83663e2dad23-run-httpd\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.492151 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-bqnkw"] Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.492498 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" podUID="11a46772-3366-44ee-9479-0be0f0cfaca4" containerName="dnsmasq-dns" containerID="cri-o://3d2b3ec4bf9c08452de9b8063c585585547d4154a21b1e338665fd069b6d739f" gracePeriod=10 Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.529890 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad8829d7-3d58-4752-9f62-83663e2dad23-run-httpd\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.530025 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad8829d7-3d58-4752-9f62-83663e2dad23-log-httpd\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.530058 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.530091 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-config-data\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.530153 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-scripts\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.530194 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.530224 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.530266 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd5b7\" (UniqueName: \"kubernetes.io/projected/ad8829d7-3d58-4752-9f62-83663e2dad23-kube-api-access-qd5b7\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.532154 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad8829d7-3d58-4752-9f62-83663e2dad23-run-httpd\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.532800 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ad8829d7-3d58-4752-9f62-83663e2dad23-log-httpd\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.550818 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.551776 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.552162 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-config-data\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.565579 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-scripts\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.575219 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd5b7\" (UniqueName: \"kubernetes.io/projected/ad8829d7-3d58-4752-9f62-83663e2dad23-kube-api-access-qd5b7\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.575873 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad8829d7-3d58-4752-9f62-83663e2dad23-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ad8829d7-3d58-4752-9f62-83663e2dad23\") " pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.669267 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.979147 4948 generic.go:334] "Generic (PLEG): container finished" podID="11a46772-3366-44ee-9479-0be0f0cfaca4" containerID="3d2b3ec4bf9c08452de9b8063c585585547d4154a21b1e338665fd069b6d739f" exitCode=0 Jan 20 20:10:03 crc kubenswrapper[4948]: I0120 20:10:03.980584 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" event={"ID":"11a46772-3366-44ee-9479-0be0f0cfaca4","Type":"ContainerDied","Data":"3d2b3ec4bf9c08452de9b8063c585585547d4154a21b1e338665fd069b6d739f"} Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.197743 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.251642 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-swift-storage-0\") pod \"11a46772-3366-44ee-9479-0be0f0cfaca4\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.251738 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-svc\") pod \"11a46772-3366-44ee-9479-0be0f0cfaca4\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.251812 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-config\") pod \"11a46772-3366-44ee-9479-0be0f0cfaca4\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.251858 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmbhx\" (UniqueName: \"kubernetes.io/projected/11a46772-3366-44ee-9479-0be0f0cfaca4-kube-api-access-mmbhx\") pod \"11a46772-3366-44ee-9479-0be0f0cfaca4\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.251910 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-sb\") pod \"11a46772-3366-44ee-9479-0be0f0cfaca4\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.252001 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-nb\") pod \"11a46772-3366-44ee-9479-0be0f0cfaca4\" (UID: \"11a46772-3366-44ee-9479-0be0f0cfaca4\") " Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.315649 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11a46772-3366-44ee-9479-0be0f0cfaca4-kube-api-access-mmbhx" (OuterVolumeSpecName: "kube-api-access-mmbhx") pod "11a46772-3366-44ee-9479-0be0f0cfaca4" (UID: "11a46772-3366-44ee-9479-0be0f0cfaca4"). InnerVolumeSpecName "kube-api-access-mmbhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.346808 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "11a46772-3366-44ee-9479-0be0f0cfaca4" (UID: "11a46772-3366-44ee-9479-0be0f0cfaca4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.356685 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmbhx\" (UniqueName: \"kubernetes.io/projected/11a46772-3366-44ee-9479-0be0f0cfaca4-kube-api-access-mmbhx\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.356725 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.427281 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-config" (OuterVolumeSpecName: "config") pod "11a46772-3366-44ee-9479-0be0f0cfaca4" (UID: "11a46772-3366-44ee-9479-0be0f0cfaca4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.443271 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "11a46772-3366-44ee-9479-0be0f0cfaca4" (UID: "11a46772-3366-44ee-9479-0be0f0cfaca4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.452082 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "11a46772-3366-44ee-9479-0be0f0cfaca4" (UID: "11a46772-3366-44ee-9479-0be0f0cfaca4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.458050 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.458224 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.458294 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.459625 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "11a46772-3366-44ee-9479-0be0f0cfaca4" (UID: "11a46772-3366-44ee-9479-0be0f0cfaca4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.476846 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.560289 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/11a46772-3366-44ee-9479-0be0f0cfaca4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.590596 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="498c1699-0031-4363-8686-5f5cdf52c7b2" path="/var/lib/kubelet/pods/498c1699-0031-4363-8686-5f5cdf52c7b2/volumes" Jan 20 20:10:04 crc kubenswrapper[4948]: I0120 20:10:04.996903 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad8829d7-3d58-4752-9f62-83663e2dad23","Type":"ContainerStarted","Data":"8a56afa3f642e92d6e00049f7eb8fd99b6c672c3d3e08640d65a59437d747105"} Jan 20 20:10:05 crc kubenswrapper[4948]: I0120 20:10:05.002195 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" event={"ID":"11a46772-3366-44ee-9479-0be0f0cfaca4","Type":"ContainerDied","Data":"324310ec1665f2df4760454bb02b9c9ad421d8e50b6de8a7cf360d51d419814a"} Jan 20 20:10:05 crc kubenswrapper[4948]: I0120 20:10:05.002253 4948 scope.go:117] "RemoveContainer" containerID="3d2b3ec4bf9c08452de9b8063c585585547d4154a21b1e338665fd069b6d739f" Jan 20 20:10:05 crc kubenswrapper[4948]: I0120 20:10:05.002402 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-bqnkw" Jan 20 20:10:05 crc kubenswrapper[4948]: I0120 20:10:05.052889 4948 scope.go:117] "RemoveContainer" containerID="74a737bf5d82290a8810d5232c961e118d1224fef675fea127422df5490e61bf" Jan 20 20:10:05 crc kubenswrapper[4948]: I0120 20:10:05.059047 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-bqnkw"] Jan 20 20:10:05 crc kubenswrapper[4948]: I0120 20:10:05.079825 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-bqnkw"] Jan 20 20:10:06 crc kubenswrapper[4948]: I0120 20:10:06.012434 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad8829d7-3d58-4752-9f62-83663e2dad23","Type":"ContainerStarted","Data":"73c52fc201e4cb81742a039d33c38c09409332b31d355445fca1c4082ec32f71"} Jan 20 20:10:06 crc kubenswrapper[4948]: I0120 20:10:06.582421 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11a46772-3366-44ee-9479-0be0f0cfaca4" path="/var/lib/kubelet/pods/11a46772-3366-44ee-9479-0be0f0cfaca4/volumes" Jan 20 20:10:07 crc kubenswrapper[4948]: I0120 20:10:07.023393 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad8829d7-3d58-4752-9f62-83663e2dad23","Type":"ContainerStarted","Data":"95993ff278d645a5ae4de5f546aeec43399873ab4f156fb6f32b807f4c8e65e9"} Jan 20 20:10:08 crc kubenswrapper[4948]: I0120 20:10:08.035103 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad8829d7-3d58-4752-9f62-83663e2dad23","Type":"ContainerStarted","Data":"5099c727c36ba98676c96e09f37de7078697c0305fc2daa1cc54f8578f88b9d3"} Jan 20 20:10:09 crc kubenswrapper[4948]: I0120 20:10:09.047953 4948 generic.go:334] "Generic (PLEG): container finished" podID="5d2feaec-203c-425a-86bf-c7681f07bafd" containerID="8cc835529b854c5ab517f1ba92dede45b691a9de124e026a24407c65d2235fc2" exitCode=0 Jan 20 20:10:09 crc kubenswrapper[4948]: I0120 20:10:09.048063 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gfmgp" event={"ID":"5d2feaec-203c-425a-86bf-c7681f07bafd","Type":"ContainerDied","Data":"8cc835529b854c5ab517f1ba92dede45b691a9de124e026a24407c65d2235fc2"} Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.059612 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ad8829d7-3d58-4752-9f62-83663e2dad23","Type":"ContainerStarted","Data":"5398c6489381d70d6ef996fd7daafa236417e2b6f88ec1c0b19892deb63d90d1"} Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.220397 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.336502324 podStartE2EDuration="7.220373931s" podCreationTimestamp="2026-01-20 20:10:03 +0000 UTC" firstStartedPulling="2026-01-20 20:10:04.478498724 +0000 UTC m=+1232.429223693" lastFinishedPulling="2026-01-20 20:10:09.362370331 +0000 UTC m=+1237.313095300" observedRunningTime="2026-01-20 20:10:10.209052231 +0000 UTC m=+1238.159777200" watchObservedRunningTime="2026-01-20 20:10:10.220373931 +0000 UTC m=+1238.171098900" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.696183 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.785776 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-config-data\") pod \"5d2feaec-203c-425a-86bf-c7681f07bafd\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.785844 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgm7p\" (UniqueName: \"kubernetes.io/projected/5d2feaec-203c-425a-86bf-c7681f07bafd-kube-api-access-lgm7p\") pod \"5d2feaec-203c-425a-86bf-c7681f07bafd\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.785914 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-combined-ca-bundle\") pod \"5d2feaec-203c-425a-86bf-c7681f07bafd\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.786654 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-scripts\") pod \"5d2feaec-203c-425a-86bf-c7681f07bafd\" (UID: \"5d2feaec-203c-425a-86bf-c7681f07bafd\") " Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.800055 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d2feaec-203c-425a-86bf-c7681f07bafd-kube-api-access-lgm7p" (OuterVolumeSpecName: "kube-api-access-lgm7p") pod "5d2feaec-203c-425a-86bf-c7681f07bafd" (UID: "5d2feaec-203c-425a-86bf-c7681f07bafd"). InnerVolumeSpecName "kube-api-access-lgm7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.800092 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-scripts" (OuterVolumeSpecName: "scripts") pod "5d2feaec-203c-425a-86bf-c7681f07bafd" (UID: "5d2feaec-203c-425a-86bf-c7681f07bafd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.815866 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-config-data" (OuterVolumeSpecName: "config-data") pod "5d2feaec-203c-425a-86bf-c7681f07bafd" (UID: "5d2feaec-203c-425a-86bf-c7681f07bafd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.845597 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d2feaec-203c-425a-86bf-c7681f07bafd" (UID: "5d2feaec-203c-425a-86bf-c7681f07bafd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.888474 4948 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-scripts\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.888507 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.888518 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgm7p\" (UniqueName: \"kubernetes.io/projected/5d2feaec-203c-425a-86bf-c7681f07bafd-kube-api-access-lgm7p\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:10 crc kubenswrapper[4948]: I0120 20:10:10.888530 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2feaec-203c-425a-86bf-c7681f07bafd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.069753 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gfmgp" event={"ID":"5d2feaec-203c-425a-86bf-c7681f07bafd","Type":"ContainerDied","Data":"2209e0cedb9332277d82b217cedf3970356e0059ce306d6c272c11bf3f0af5ca"} Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.069810 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gfmgp" Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.069819 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2209e0cedb9332277d82b217cedf3970356e0059ce306d6c272c11bf3f0af5ca" Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.070102 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.234429 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.234497 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.354264 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.410182 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.410480 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-log" containerID="cri-o://ced74b77f9231f99559bcbf5acf84d152938805fd81a9a90bebb671870edbabb" gracePeriod=30 Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.411062 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-metadata" containerID="cri-o://214bbc05e6b10db32eae871db871075877e141ad6abb1fac63a3a9dc5ab0402a" gracePeriod=30 Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.422685 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:10:11 crc kubenswrapper[4948]: I0120 20:10:11.423057 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" containerName="nova-scheduler-scheduler" containerID="cri-o://53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b" gracePeriod=30 Jan 20 20:10:12 crc kubenswrapper[4948]: I0120 20:10:12.080576 4948 generic.go:334] "Generic (PLEG): container finished" podID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerID="ced74b77f9231f99559bcbf5acf84d152938805fd81a9a90bebb671870edbabb" exitCode=143 Jan 20 20:10:12 crc kubenswrapper[4948]: I0120 20:10:12.080653 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26","Type":"ContainerDied","Data":"ced74b77f9231f99559bcbf5acf84d152938805fd81a9a90bebb671870edbabb"} Jan 20 20:10:12 crc kubenswrapper[4948]: I0120 20:10:12.081006 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-log" containerID="cri-o://df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea" gracePeriod=30 Jan 20 20:10:12 crc kubenswrapper[4948]: I0120 20:10:12.081053 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-api" containerID="cri-o://c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1" gracePeriod=30 Jan 20 20:10:12 crc kubenswrapper[4948]: I0120 20:10:12.100720 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": EOF" Jan 20 20:10:12 crc kubenswrapper[4948]: I0120 20:10:12.100974 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": EOF" Jan 20 20:10:13 crc kubenswrapper[4948]: I0120 20:10:13.091322 4948 generic.go:334] "Generic (PLEG): container finished" podID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerID="df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea" exitCode=143 Jan 20 20:10:13 crc kubenswrapper[4948]: I0120 20:10:13.091404 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da0eaf22-41f0-4b2f-b93e-36715d9e8499","Type":"ContainerDied","Data":"df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea"} Jan 20 20:10:14 crc kubenswrapper[4948]: I0120 20:10:14.837936 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": read tcp 10.217.0.2:60542->10.217.0.196:8775: read: connection reset by peer" Jan 20 20:10:14 crc kubenswrapper[4948]: I0120 20:10:14.838030 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": read tcp 10.217.0.2:60540->10.217.0.196:8775: read: connection reset by peer" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.114817 4948 generic.go:334] "Generic (PLEG): container finished" podID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerID="214bbc05e6b10db32eae871db871075877e141ad6abb1fac63a3a9dc5ab0402a" exitCode=0 Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.114871 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26","Type":"ContainerDied","Data":"214bbc05e6b10db32eae871db871075877e141ad6abb1fac63a3a9dc5ab0402a"} Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.353240 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.382393 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-config-data\") pod \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.382670 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-nova-metadata-tls-certs\") pod \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.382724 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqqc8\" (UniqueName: \"kubernetes.io/projected/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-kube-api-access-bqqc8\") pod \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.382758 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-combined-ca-bundle\") pod \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.382790 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-logs\") pod \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\" (UID: \"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26\") " Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.383664 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-logs" (OuterVolumeSpecName: "logs") pod "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" (UID: "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.388274 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-kube-api-access-bqqc8" (OuterVolumeSpecName: "kube-api-access-bqqc8") pod "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" (UID: "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26"). InnerVolumeSpecName "kube-api-access-bqqc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.460782 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" (UID: "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.485514 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqqc8\" (UniqueName: \"kubernetes.io/projected/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-kube-api-access-bqqc8\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.485555 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.485567 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.523453 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-config-data" (OuterVolumeSpecName: "config-data") pod "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" (UID: "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.582892 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" (UID: "824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.588048 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:15 crc kubenswrapper[4948]: I0120 20:10:15.588067 4948 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:15 crc kubenswrapper[4948]: E0120 20:10:15.750556 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b is running failed: container process not found" containerID="53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 20:10:15 crc kubenswrapper[4948]: E0120 20:10:15.750998 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b is running failed: container process not found" containerID="53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 20:10:15 crc kubenswrapper[4948]: E0120 20:10:15.751343 4948 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b is running failed: container process not found" containerID="53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 20 20:10:15 crc kubenswrapper[4948]: E0120 20:10:15.751458 4948 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" containerName="nova-scheduler-scheduler" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.046957 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.098806 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-config-data\") pod \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.098905 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flspp\" (UniqueName: \"kubernetes.io/projected/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-kube-api-access-flspp\") pod \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.098981 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-combined-ca-bundle\") pod \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\" (UID: \"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8\") " Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.106287 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-kube-api-access-flspp" (OuterVolumeSpecName: "kube-api-access-flspp") pod "6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" (UID: "6c6fe1bc-8f9f-4504-97cc-1ac4905634a8"). InnerVolumeSpecName "kube-api-access-flspp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.141752 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-config-data" (OuterVolumeSpecName: "config-data") pod "6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" (UID: "6c6fe1bc-8f9f-4504-97cc-1ac4905634a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.157310 4948 generic.go:334] "Generic (PLEG): container finished" podID="6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" containerID="53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b" exitCode=0 Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.157456 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8","Type":"ContainerDied","Data":"53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b"} Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.157491 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6c6fe1bc-8f9f-4504-97cc-1ac4905634a8","Type":"ContainerDied","Data":"8af4ed67ea7b4e2e8156924b70d91a9309b84ffa86a6a8b6ef9426dd66a86b3a"} Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.157517 4948 scope.go:117] "RemoveContainer" containerID="53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.157920 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.160734 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26","Type":"ContainerDied","Data":"30d103d9618d84221f6b19798057b16165b7ace2193ce22cb2c466c273d5eed7"} Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.160889 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.188714 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" (UID: "6c6fe1bc-8f9f-4504-97cc-1ac4905634a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.202134 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.202169 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flspp\" (UniqueName: \"kubernetes.io/projected/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-kube-api-access-flspp\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.202183 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.243291 4948 scope.go:117] "RemoveContainer" containerID="53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b" Jan 20 20:10:16 crc kubenswrapper[4948]: E0120 20:10:16.253831 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b\": container with ID starting with 53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b not found: ID does not exist" containerID="53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.253884 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b"} err="failed to get container status \"53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b\": rpc error: code = NotFound desc = could not find container \"53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b\": container with ID starting with 53b7bc16efe51b6ecad4b979afdfaeab20e1c2a925fed97be4d64839562dc65b not found: ID does not exist" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.253914 4948 scope.go:117] "RemoveContainer" containerID="214bbc05e6b10db32eae871db871075877e141ad6abb1fac63a3a9dc5ab0402a" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.271645 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.283755 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.308605 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:10:16 crc kubenswrapper[4948]: E0120 20:10:16.309249 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-log" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.309350 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-log" Jan 20 20:10:16 crc kubenswrapper[4948]: E0120 20:10:16.309435 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" containerName="nova-scheduler-scheduler" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.309490 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" containerName="nova-scheduler-scheduler" Jan 20 20:10:16 crc kubenswrapper[4948]: E0120 20:10:16.309561 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11a46772-3366-44ee-9479-0be0f0cfaca4" containerName="init" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.309615 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="11a46772-3366-44ee-9479-0be0f0cfaca4" containerName="init" Jan 20 20:10:16 crc kubenswrapper[4948]: E0120 20:10:16.309674 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d2feaec-203c-425a-86bf-c7681f07bafd" containerName="nova-manage" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.309817 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d2feaec-203c-425a-86bf-c7681f07bafd" containerName="nova-manage" Jan 20 20:10:16 crc kubenswrapper[4948]: E0120 20:10:16.309880 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-metadata" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.309932 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-metadata" Jan 20 20:10:16 crc kubenswrapper[4948]: E0120 20:10:16.310000 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11a46772-3366-44ee-9479-0be0f0cfaca4" containerName="dnsmasq-dns" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.310058 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="11a46772-3366-44ee-9479-0be0f0cfaca4" containerName="dnsmasq-dns" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.310301 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d2feaec-203c-425a-86bf-c7681f07bafd" containerName="nova-manage" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.310378 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-log" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.310451 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="11a46772-3366-44ee-9479-0be0f0cfaca4" containerName="dnsmasq-dns" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.310520 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" containerName="nova-metadata-metadata" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.310581 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" containerName="nova-scheduler-scheduler" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.311657 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.322834 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.332021 4948 scope.go:117] "RemoveContainer" containerID="ced74b77f9231f99559bcbf5acf84d152938805fd81a9a90bebb671870edbabb" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.340255 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.340409 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.505011 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.506221 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/405260b6-bbf5-4d0b-8a81-686340252185-logs\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.506595 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-config-data\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.506770 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.506848 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.506907 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfrjr\" (UniqueName: \"kubernetes.io/projected/405260b6-bbf5-4d0b-8a81-686340252185-kube-api-access-qfrjr\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.514200 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.553457 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.555068 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.558902 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.601114 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c6fe1bc-8f9f-4504-97cc-1ac4905634a8" path="/var/lib/kubelet/pods/6c6fe1bc-8f9f-4504-97cc-1ac4905634a8/volumes" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.601897 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26" path="/var/lib/kubelet/pods/824bf5c9-bec4-4a65-a69f-6c3d0b7a1b26/volumes" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.602629 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.622804 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-config-data\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.622872 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d52d1e7-1dc7-4341-b483-da6863189804-config-data\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.622938 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.622976 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.623003 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfrjr\" (UniqueName: \"kubernetes.io/projected/405260b6-bbf5-4d0b-8a81-686340252185-kube-api-access-qfrjr\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.623036 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/405260b6-bbf5-4d0b-8a81-686340252185-logs\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.623090 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmwfs\" (UniqueName: \"kubernetes.io/projected/7d52d1e7-1dc7-4341-b483-da6863189804-kube-api-access-qmwfs\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.623125 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d52d1e7-1dc7-4341-b483-da6863189804-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.624439 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/405260b6-bbf5-4d0b-8a81-686340252185-logs\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.638264 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-config-data\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.639047 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.640248 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/405260b6-bbf5-4d0b-8a81-686340252185-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.650747 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfrjr\" (UniqueName: \"kubernetes.io/projected/405260b6-bbf5-4d0b-8a81-686340252185-kube-api-access-qfrjr\") pod \"nova-metadata-0\" (UID: \"405260b6-bbf5-4d0b-8a81-686340252185\") " pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.662285 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.725401 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d52d1e7-1dc7-4341-b483-da6863189804-config-data\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.725853 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmwfs\" (UniqueName: \"kubernetes.io/projected/7d52d1e7-1dc7-4341-b483-da6863189804-kube-api-access-qmwfs\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.725905 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d52d1e7-1dc7-4341-b483-da6863189804-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.733626 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d52d1e7-1dc7-4341-b483-da6863189804-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.748326 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d52d1e7-1dc7-4341-b483-da6863189804-config-data\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.759366 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmwfs\" (UniqueName: \"kubernetes.io/projected/7d52d1e7-1dc7-4341-b483-da6863189804-kube-api-access-qmwfs\") pod \"nova-scheduler-0\" (UID: \"7d52d1e7-1dc7-4341-b483-da6863189804\") " pod="openstack/nova-scheduler-0" Jan 20 20:10:16 crc kubenswrapper[4948]: I0120 20:10:16.873593 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 20 20:10:17 crc kubenswrapper[4948]: I0120 20:10:17.228073 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 20 20:10:17 crc kubenswrapper[4948]: W0120 20:10:17.381234 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d52d1e7_1dc7_4341_b483_da6863189804.slice/crio-971c359c4e32f5de8a5b583a54641f7bb2bb0573768d3713d7e80b3badb33c6b WatchSource:0}: Error finding container 971c359c4e32f5de8a5b583a54641f7bb2bb0573768d3713d7e80b3badb33c6b: Status 404 returned error can't find the container with id 971c359c4e32f5de8a5b583a54641f7bb2bb0573768d3713d7e80b3badb33c6b Jan 20 20:10:17 crc kubenswrapper[4948]: I0120 20:10:17.383675 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 20 20:10:18 crc kubenswrapper[4948]: I0120 20:10:18.193232 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"405260b6-bbf5-4d0b-8a81-686340252185","Type":"ContainerStarted","Data":"5c371d172dd5e794f362c1161ab721c7f70f3a4853ea884084d448e79ddc6aa4"} Jan 20 20:10:18 crc kubenswrapper[4948]: I0120 20:10:18.193479 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"405260b6-bbf5-4d0b-8a81-686340252185","Type":"ContainerStarted","Data":"3f6e89e3234d5ef4e1dc8a3103afbac102d49320cb53d4495622ab8e798bff8a"} Jan 20 20:10:18 crc kubenswrapper[4948]: I0120 20:10:18.193491 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"405260b6-bbf5-4d0b-8a81-686340252185","Type":"ContainerStarted","Data":"c31af4e67fa0af1b3320d8bf9e1cb633b678e86328abc90a23f76826a5d609a7"} Jan 20 20:10:18 crc kubenswrapper[4948]: I0120 20:10:18.197629 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7d52d1e7-1dc7-4341-b483-da6863189804","Type":"ContainerStarted","Data":"bbd28298ad3675f00471caaa668f2cd5602a6020067fd29c90a3ff2740bb9711"} Jan 20 20:10:18 crc kubenswrapper[4948]: I0120 20:10:18.197658 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7d52d1e7-1dc7-4341-b483-da6863189804","Type":"ContainerStarted","Data":"971c359c4e32f5de8a5b583a54641f7bb2bb0573768d3713d7e80b3badb33c6b"} Jan 20 20:10:18 crc kubenswrapper[4948]: I0120 20:10:18.242994 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.242974449 podStartE2EDuration="2.242974449s" podCreationTimestamp="2026-01-20 20:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:10:18.241156128 +0000 UTC m=+1246.191881107" watchObservedRunningTime="2026-01-20 20:10:18.242974449 +0000 UTC m=+1246.193699418" Jan 20 20:10:18 crc kubenswrapper[4948]: I0120 20:10:18.260353 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.26033646 podStartE2EDuration="2.26033646s" podCreationTimestamp="2026-01-20 20:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:10:18.258173389 +0000 UTC m=+1246.208898368" watchObservedRunningTime="2026-01-20 20:10:18.26033646 +0000 UTC m=+1246.211061429" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.037758 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.174935 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0eaf22-41f0-4b2f-b93e-36715d9e8499-logs\") pod \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.175372 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da0eaf22-41f0-4b2f-b93e-36715d9e8499-logs" (OuterVolumeSpecName: "logs") pod "da0eaf22-41f0-4b2f-b93e-36715d9e8499" (UID: "da0eaf22-41f0-4b2f-b93e-36715d9e8499"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.176219 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-combined-ca-bundle\") pod \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.176274 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-internal-tls-certs\") pod \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.176312 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjtwx\" (UniqueName: \"kubernetes.io/projected/da0eaf22-41f0-4b2f-b93e-36715d9e8499-kube-api-access-hjtwx\") pod \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.176346 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-public-tls-certs\") pod \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.176388 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-config-data\") pod \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\" (UID: \"da0eaf22-41f0-4b2f-b93e-36715d9e8499\") " Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.176646 4948 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/da0eaf22-41f0-4b2f-b93e-36715d9e8499-logs\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.210534 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da0eaf22-41f0-4b2f-b93e-36715d9e8499-kube-api-access-hjtwx" (OuterVolumeSpecName: "kube-api-access-hjtwx") pod "da0eaf22-41f0-4b2f-b93e-36715d9e8499" (UID: "da0eaf22-41f0-4b2f-b93e-36715d9e8499"). InnerVolumeSpecName "kube-api-access-hjtwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.215940 4948 generic.go:334] "Generic (PLEG): container finished" podID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerID="c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1" exitCode=0 Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.216515 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-config-data" (OuterVolumeSpecName: "config-data") pod "da0eaf22-41f0-4b2f-b93e-36715d9e8499" (UID: "da0eaf22-41f0-4b2f-b93e-36715d9e8499"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.216605 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da0eaf22-41f0-4b2f-b93e-36715d9e8499","Type":"ContainerDied","Data":"c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1"} Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.216681 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"da0eaf22-41f0-4b2f-b93e-36715d9e8499","Type":"ContainerDied","Data":"65d064e4d0c8dfa1ffe68c516f261565718e50e0878e2acd6ef0ad7f9b6873c8"} Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.216780 4948 scope.go:117] "RemoveContainer" containerID="c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.217011 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.259865 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "da0eaf22-41f0-4b2f-b93e-36715d9e8499" (UID: "da0eaf22-41f0-4b2f-b93e-36715d9e8499"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.269908 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "da0eaf22-41f0-4b2f-b93e-36715d9e8499" (UID: "da0eaf22-41f0-4b2f-b93e-36715d9e8499"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.278026 4948 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.278066 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjtwx\" (UniqueName: \"kubernetes.io/projected/da0eaf22-41f0-4b2f-b93e-36715d9e8499-kube-api-access-hjtwx\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.278083 4948 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.278097 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.283813 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da0eaf22-41f0-4b2f-b93e-36715d9e8499" (UID: "da0eaf22-41f0-4b2f-b93e-36715d9e8499"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.380001 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da0eaf22-41f0-4b2f-b93e-36715d9e8499-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.396779 4948 scope.go:117] "RemoveContainer" containerID="df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.419134 4948 scope.go:117] "RemoveContainer" containerID="c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1" Jan 20 20:10:19 crc kubenswrapper[4948]: E0120 20:10:19.419959 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1\": container with ID starting with c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1 not found: ID does not exist" containerID="c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.419991 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1"} err="failed to get container status \"c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1\": rpc error: code = NotFound desc = could not find container \"c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1\": container with ID starting with c295e200a63c1b58ce1e54306cf4406f52b541e9063634581ecc84794761a5a1 not found: ID does not exist" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.420013 4948 scope.go:117] "RemoveContainer" containerID="df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea" Jan 20 20:10:19 crc kubenswrapper[4948]: E0120 20:10:19.420226 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea\": container with ID starting with df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea not found: ID does not exist" containerID="df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.420248 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea"} err="failed to get container status \"df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea\": rpc error: code = NotFound desc = could not find container \"df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea\": container with ID starting with df5b9b3eb17c45ffd622d94564b990205ea1e122088d47c52fa8de1c01dbedea not found: ID does not exist" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.553077 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.561927 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.588212 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:19 crc kubenswrapper[4948]: E0120 20:10:19.588752 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-api" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.588776 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-api" Jan 20 20:10:19 crc kubenswrapper[4948]: E0120 20:10:19.588799 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-log" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.588808 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-log" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.589043 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-api" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.589078 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" containerName="nova-api-log" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.590265 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.597205 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.598069 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.598258 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.629932 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.690285 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.690425 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-logs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.690991 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-public-tls-certs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.691102 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.691159 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-config-data\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.691234 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t6cz\" (UniqueName: \"kubernetes.io/projected/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-kube-api-access-7t6cz\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.793048 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t6cz\" (UniqueName: \"kubernetes.io/projected/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-kube-api-access-7t6cz\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.793129 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.793232 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-logs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.793267 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-public-tls-certs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.793335 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.793372 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-config-data\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.794028 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-logs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.797559 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-public-tls-certs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.797836 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.798161 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-config-data\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.798522 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.831881 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t6cz\" (UniqueName: \"kubernetes.io/projected/0bef1366-a94a-4d51-a5b4-53fe9a86a4d9-kube-api-access-7t6cz\") pod \"nova-api-0\" (UID: \"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9\") " pod="openstack/nova-api-0" Jan 20 20:10:19 crc kubenswrapper[4948]: I0120 20:10:19.917086 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 20 20:10:20 crc kubenswrapper[4948]: I0120 20:10:20.444912 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 20 20:10:20 crc kubenswrapper[4948]: I0120 20:10:20.582906 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da0eaf22-41f0-4b2f-b93e-36715d9e8499" path="/var/lib/kubelet/pods/da0eaf22-41f0-4b2f-b93e-36715d9e8499/volumes" Jan 20 20:10:21 crc kubenswrapper[4948]: I0120 20:10:21.239221 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9","Type":"ContainerStarted","Data":"ba5c4024749927bde6e5699e5aa22bcd14ba3539f9d41dcf5e317ef178df2e69"} Jan 20 20:10:21 crc kubenswrapper[4948]: I0120 20:10:21.239541 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9","Type":"ContainerStarted","Data":"4c34c1c5e404e40d81e8f5c73df01c48ab4f36adb9f63942acc7e737e6788be1"} Jan 20 20:10:21 crc kubenswrapper[4948]: I0120 20:10:21.239555 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0bef1366-a94a-4d51-a5b4-53fe9a86a4d9","Type":"ContainerStarted","Data":"c34cdadad0203c923e9390c25f7bc4aed59e5e5e71ab9730072d189dcfdeb986"} Jan 20 20:10:21 crc kubenswrapper[4948]: I0120 20:10:21.271393 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.27137145 podStartE2EDuration="2.27137145s" podCreationTimestamp="2026-01-20 20:10:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:10:21.267348607 +0000 UTC m=+1249.218073576" watchObservedRunningTime="2026-01-20 20:10:21.27137145 +0000 UTC m=+1249.222096419" Jan 20 20:10:21 crc kubenswrapper[4948]: I0120 20:10:21.663612 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 20:10:21 crc kubenswrapper[4948]: I0120 20:10:21.663967 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 20 20:10:21 crc kubenswrapper[4948]: I0120 20:10:21.875100 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 20 20:10:26 crc kubenswrapper[4948]: I0120 20:10:26.664047 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 20 20:10:26 crc kubenswrapper[4948]: I0120 20:10:26.664629 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 20 20:10:26 crc kubenswrapper[4948]: I0120 20:10:26.875576 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 20 20:10:26 crc kubenswrapper[4948]: I0120 20:10:26.904850 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 20 20:10:27 crc kubenswrapper[4948]: I0120 20:10:27.442191 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 20 20:10:27 crc kubenswrapper[4948]: I0120 20:10:27.680037 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="405260b6-bbf5-4d0b-8a81-686340252185" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:10:27 crc kubenswrapper[4948]: I0120 20:10:27.681050 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="405260b6-bbf5-4d0b-8a81-686340252185" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:10:29 crc kubenswrapper[4948]: I0120 20:10:29.917454 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 20:10:29 crc kubenswrapper[4948]: I0120 20:10:29.918032 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 20 20:10:30 crc kubenswrapper[4948]: I0120 20:10:30.931173 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0bef1366-a94a-4d51-a5b4-53fe9a86a4d9" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:10:30 crc kubenswrapper[4948]: I0120 20:10:30.931161 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0bef1366-a94a-4d51-a5b4-53fe9a86a4d9" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 20 20:10:33 crc kubenswrapper[4948]: I0120 20:10:33.681820 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 20 20:10:36 crc kubenswrapper[4948]: I0120 20:10:36.674119 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 20 20:10:36 crc kubenswrapper[4948]: I0120 20:10:36.677277 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 20 20:10:36 crc kubenswrapper[4948]: I0120 20:10:36.682991 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 20 20:10:37 crc kubenswrapper[4948]: I0120 20:10:37.404794 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 20 20:10:39 crc kubenswrapper[4948]: I0120 20:10:39.926313 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 20 20:10:39 crc kubenswrapper[4948]: I0120 20:10:39.926914 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 20 20:10:39 crc kubenswrapper[4948]: I0120 20:10:39.927225 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 20 20:10:39 crc kubenswrapper[4948]: I0120 20:10:39.959022 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 20 20:10:40 crc kubenswrapper[4948]: I0120 20:10:40.424047 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 20 20:10:40 crc kubenswrapper[4948]: I0120 20:10:40.432863 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 20 20:10:48 crc kubenswrapper[4948]: I0120 20:10:48.761584 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:10:49 crc kubenswrapper[4948]: I0120 20:10:49.659929 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:10:50 crc kubenswrapper[4948]: I0120 20:10:50.250612 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:10:50 crc kubenswrapper[4948]: I0120 20:10:50.250674 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:10:53 crc kubenswrapper[4948]: I0120 20:10:53.311676 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" containerName="rabbitmq" containerID="cri-o://1d5035085a041f76275ed70c0ab7e14cebb8b68fc62dcc8a4d27ec6b7211db0d" gracePeriod=604796 Jan 20 20:10:54 crc kubenswrapper[4948]: I0120 20:10:54.461944 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" containerName="rabbitmq" containerID="cri-o://d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9" gracePeriod=604796 Jan 20 20:10:59 crc kubenswrapper[4948]: I0120 20:10:59.648335 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"98083b85-e2b1-48e2-82f9-c71019aa2475","Type":"ContainerDied","Data":"1d5035085a041f76275ed70c0ab7e14cebb8b68fc62dcc8a4d27ec6b7211db0d"} Jan 20 20:10:59 crc kubenswrapper[4948]: I0120 20:10:59.648253 4948 generic.go:334] "Generic (PLEG): container finished" podID="98083b85-e2b1-48e2-82f9-c71019aa2475" containerID="1d5035085a041f76275ed70c0ab7e14cebb8b68fc62dcc8a4d27ec6b7211db0d" exitCode=0 Jan 20 20:10:59 crc kubenswrapper[4948]: I0120 20:10:59.901492 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.096468 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.096534 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6jc8\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-kube-api-access-p6jc8\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.096579 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-confd\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.096632 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-erlang-cookie\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.096650 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-plugins-conf\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.096925 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98083b85-e2b1-48e2-82f9-c71019aa2475-pod-info\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.097008 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98083b85-e2b1-48e2-82f9-c71019aa2475-erlang-cookie-secret\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.097091 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-config-data\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.097127 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-tls\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.097173 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-server-conf\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.097209 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-plugins\") pod \"98083b85-e2b1-48e2-82f9-c71019aa2475\" (UID: \"98083b85-e2b1-48e2-82f9-c71019aa2475\") " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.098247 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.098631 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.099035 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.103795 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/98083b85-e2b1-48e2-82f9-c71019aa2475-pod-info" (OuterVolumeSpecName: "pod-info") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.105674 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.107417 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-kube-api-access-p6jc8" (OuterVolumeSpecName: "kube-api-access-p6jc8") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "kube-api-access-p6jc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.109297 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.109847 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98083b85-e2b1-48e2-82f9-c71019aa2475-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.147097 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-config-data" (OuterVolumeSpecName: "config-data") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202194 4948 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/98083b85-e2b1-48e2-82f9-c71019aa2475-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202258 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202268 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202279 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202356 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202367 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6jc8\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-kube-api-access-p6jc8\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202401 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202416 4948 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.202427 4948 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/98083b85-e2b1-48e2-82f9-c71019aa2475-pod-info\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.246410 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.255697 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-server-conf" (OuterVolumeSpecName: "server-conf") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.274858 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "98083b85-e2b1-48e2-82f9-c71019aa2475" (UID: "98083b85-e2b1-48e2-82f9-c71019aa2475"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.304930 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.304967 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/98083b85-e2b1-48e2-82f9-c71019aa2475-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.304987 4948 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/98083b85-e2b1-48e2-82f9-c71019aa2475-server-conf\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.659644 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"98083b85-e2b1-48e2-82f9-c71019aa2475","Type":"ContainerDied","Data":"cd508d06f03199662e24df331e8edb08892a44ca23579abf655daae83300a630"} Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.659894 4948 scope.go:117] "RemoveContainer" containerID="1d5035085a041f76275ed70c0ab7e14cebb8b68fc62dcc8a4d27ec6b7211db0d" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.659819 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.730905 4948 scope.go:117] "RemoveContainer" containerID="88ea89f84b7617f501ddbb4b9afb6561e4fd047f7d7e5577d0b84b4bdbfe0e71" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.741788 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.751282 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.777036 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:11:00 crc kubenswrapper[4948]: E0120 20:11:00.777448 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" containerName="rabbitmq" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.777471 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" containerName="rabbitmq" Jan 20 20:11:00 crc kubenswrapper[4948]: E0120 20:11:00.777498 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" containerName="setup-container" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.777504 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" containerName="setup-container" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.777677 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" containerName="rabbitmq" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.779077 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.794081 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.794132 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.794329 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.794560 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.794641 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.794758 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.794833 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-2f6qg" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.825562 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.919789 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.919837 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.919861 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.919882 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.919909 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.920119 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8c30b121-20f6-47ad-89e0-ce511df4efb7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.920206 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.920268 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-config-data\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.920363 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjt6z\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-kube-api-access-wjt6z\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.920494 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8c30b121-20f6-47ad-89e0-ce511df4efb7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:00 crc kubenswrapper[4948]: I0120 20:11:00.920553 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.021780 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8c30b121-20f6-47ad-89e0-ce511df4efb7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.021835 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.021868 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-config-data\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.021906 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjt6z\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-kube-api-access-wjt6z\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.021958 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8c30b121-20f6-47ad-89e0-ce511df4efb7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.021988 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.022023 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.022043 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.022069 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.022089 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.022112 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.022499 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.023518 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.025110 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.025288 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.025902 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.043229 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8c30b121-20f6-47ad-89e0-ce511df4efb7-config-data\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.046367 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.059177 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/8c30b121-20f6-47ad-89e0-ce511df4efb7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.059601 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/8c30b121-20f6-47ad-89e0-ce511df4efb7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.188312 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjt6z\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-kube-api-access-wjt6z\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.262176 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/8c30b121-20f6-47ad-89e0-ce511df4efb7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.313479 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"8c30b121-20f6-47ad-89e0-ce511df4efb7\") " pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.401426 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.456462 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.513341 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-plugins\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.513787 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e243433b-5932-4d3d-a280-b7999d49e1ec-erlang-cookie-secret\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.513962 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-erlang-cookie\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514386 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514400 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-config-data\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514470 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8xlj\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-kube-api-access-d8xlj\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514596 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-tls\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514692 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-server-conf\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514797 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514844 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-plugins-conf\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514879 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e243433b-5932-4d3d-a280-b7999d49e1ec-pod-info\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.514920 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-confd\") pod \"e243433b-5932-4d3d-a280-b7999d49e1ec\" (UID: \"e243433b-5932-4d3d-a280-b7999d49e1ec\") " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.515598 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.519113 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.524952 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.525111 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.532858 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e243433b-5932-4d3d-a280-b7999d49e1ec-pod-info" (OuterVolumeSpecName: "pod-info") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.536116 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-kube-api-access-d8xlj" (OuterVolumeSpecName: "kube-api-access-d8xlj") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "kube-api-access-d8xlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.541458 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e243433b-5932-4d3d-a280-b7999d49e1ec-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.541625 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.621735 4948 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e243433b-5932-4d3d-a280-b7999d49e1ec-pod-info\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.621776 4948 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e243433b-5932-4d3d-a280-b7999d49e1ec-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.621788 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.621798 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8xlj\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-kube-api-access-d8xlj\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.621807 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.621837 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.621857 4948 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.642336 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-config-data" (OuterVolumeSpecName: "config-data") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.692508 4948 generic.go:334] "Generic (PLEG): container finished" podID="e243433b-5932-4d3d-a280-b7999d49e1ec" containerID="d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9" exitCode=0 Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.692546 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e243433b-5932-4d3d-a280-b7999d49e1ec","Type":"ContainerDied","Data":"d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9"} Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.692567 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e243433b-5932-4d3d-a280-b7999d49e1ec","Type":"ContainerDied","Data":"ff8946b701b6fa3b50707f6d57b561ed1d7b90562fae8aa23dbf396ecae63556"} Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.692584 4948 scope.go:117] "RemoveContainer" containerID="d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.692687 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.723205 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.726325 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.750303 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-server-conf" (OuterVolumeSpecName: "server-conf") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.798237 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e243433b-5932-4d3d-a280-b7999d49e1ec" (UID: "e243433b-5932-4d3d-a280-b7999d49e1ec"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.826113 4948 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e243433b-5932-4d3d-a280-b7999d49e1ec-server-conf\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.826150 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.826161 4948 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e243433b-5932-4d3d-a280-b7999d49e1ec-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.889791 4948 scope.go:117] "RemoveContainer" containerID="eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.921642 4948 scope.go:117] "RemoveContainer" containerID="d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9" Jan 20 20:11:01 crc kubenswrapper[4948]: E0120 20:11:01.922071 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9\": container with ID starting with d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9 not found: ID does not exist" containerID="d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.922096 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9"} err="failed to get container status \"d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9\": rpc error: code = NotFound desc = could not find container \"d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9\": container with ID starting with d13b055e9b3b3b633f0d2262529bbb552d97e9c2480e397e731e702de63dc7b9 not found: ID does not exist" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.922116 4948 scope.go:117] "RemoveContainer" containerID="eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce" Jan 20 20:11:01 crc kubenswrapper[4948]: E0120 20:11:01.922571 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce\": container with ID starting with eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce not found: ID does not exist" containerID="eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce" Jan 20 20:11:01 crc kubenswrapper[4948]: I0120 20:11:01.922594 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce"} err="failed to get container status \"eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce\": rpc error: code = NotFound desc = could not find container \"eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce\": container with ID starting with eeb52ae00faae534951293dcffb752fed3331ae3eb5a120abdcf16f22e3a21ce not found: ID does not exist" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.084107 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.105597 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.139051 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:11:02 crc kubenswrapper[4948]: E0120 20:11:02.159850 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" containerName="rabbitmq" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.160104 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" containerName="rabbitmq" Jan 20 20:11:02 crc kubenswrapper[4948]: E0120 20:11:02.160221 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" containerName="setup-container" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.160319 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" containerName="setup-container" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.160972 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" containerName="rabbitmq" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.167622 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.167966 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.171390 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.171847 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.171964 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.172863 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-bjbgp" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.174023 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.174266 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.174324 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.292487 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.337814 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ml94z\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-kube-api-access-ml94z\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.337898 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.337940 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.337978 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.338012 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/899d2813-4685-40b7-ba95-60d3126802a2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.338055 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.338083 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.338126 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/899d2813-4685-40b7-ba95-60d3126802a2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.338156 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.338202 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.338221 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439414 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ml94z\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-kube-api-access-ml94z\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439474 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439513 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439534 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439560 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/899d2813-4685-40b7-ba95-60d3126802a2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439590 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439610 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439644 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/899d2813-4685-40b7-ba95-60d3126802a2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439665 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439720 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.439747 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.440675 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.441079 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.441227 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.441905 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.442048 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.442961 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/899d2813-4685-40b7-ba95-60d3126802a2-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.451430 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.451463 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.451546 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/899d2813-4685-40b7-ba95-60d3126802a2-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.451563 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/899d2813-4685-40b7-ba95-60d3126802a2-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.462465 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ml94z\" (UniqueName: \"kubernetes.io/projected/899d2813-4685-40b7-ba95-60d3126802a2-kube-api-access-ml94z\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.482446 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"899d2813-4685-40b7-ba95-60d3126802a2\") " pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.485586 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-wrtnd"] Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.487152 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.489194 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.500456 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.508001 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-wrtnd"] Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.582239 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98083b85-e2b1-48e2-82f9-c71019aa2475" path="/var/lib/kubelet/pods/98083b85-e2b1-48e2-82f9-c71019aa2475/volumes" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.583565 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e243433b-5932-4d3d-a280-b7999d49e1ec" path="/var/lib/kubelet/pods/e243433b-5932-4d3d-a280-b7999d49e1ec/volumes" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.644863 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgsnx\" (UniqueName: \"kubernetes.io/projected/f09e49c0-dab2-42af-bba9-2def7afc1087-kube-api-access-kgsnx\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.645198 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.645272 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.645370 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-config\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.645445 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.645486 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.645518 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.704517 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8c30b121-20f6-47ad-89e0-ce511df4efb7","Type":"ContainerStarted","Data":"5c56a2cf4c7bda5d64fddd3aafc4d80de72d6188323f856deca7a44f8f7cf423"} Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.747622 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.747681 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.747740 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.747819 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgsnx\" (UniqueName: \"kubernetes.io/projected/f09e49c0-dab2-42af-bba9-2def7afc1087-kube-api-access-kgsnx\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.747838 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.747907 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.748015 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-config\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.749796 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.749977 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.750041 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.750229 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.750673 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-config\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.750804 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.768671 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgsnx\" (UniqueName: \"kubernetes.io/projected/f09e49c0-dab2-42af-bba9-2def7afc1087-kube-api-access-kgsnx\") pod \"dnsmasq-dns-79bd4cc8c9-wrtnd\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:02 crc kubenswrapper[4948]: I0120 20:11:02.805130 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:03 crc kubenswrapper[4948]: I0120 20:11:03.020337 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 20 20:11:03 crc kubenswrapper[4948]: W0120 20:11:03.340439 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf09e49c0_dab2_42af_bba9_2def7afc1087.slice/crio-09e092956b40d3ea9cc21fc30d6a249f43a67c11ac74a0d4bcc3a50181fdef59 WatchSource:0}: Error finding container 09e092956b40d3ea9cc21fc30d6a249f43a67c11ac74a0d4bcc3a50181fdef59: Status 404 returned error can't find the container with id 09e092956b40d3ea9cc21fc30d6a249f43a67c11ac74a0d4bcc3a50181fdef59 Jan 20 20:11:03 crc kubenswrapper[4948]: I0120 20:11:03.346015 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-wrtnd"] Jan 20 20:11:03 crc kubenswrapper[4948]: I0120 20:11:03.720825 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"899d2813-4685-40b7-ba95-60d3126802a2","Type":"ContainerStarted","Data":"a6b59da9f93cf89aa999ce6dc74c7acfe7345f31038786d756782e9f016c7aa1"} Jan 20 20:11:03 crc kubenswrapper[4948]: I0120 20:11:03.722728 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" event={"ID":"f09e49c0-dab2-42af-bba9-2def7afc1087","Type":"ContainerStarted","Data":"09e092956b40d3ea9cc21fc30d6a249f43a67c11ac74a0d4bcc3a50181fdef59"} Jan 20 20:11:04 crc kubenswrapper[4948]: I0120 20:11:04.735282 4948 generic.go:334] "Generic (PLEG): container finished" podID="f09e49c0-dab2-42af-bba9-2def7afc1087" containerID="c447a54d34e0accec44b65840a52d63790ae92c7ec7ece51fd003612cb803c30" exitCode=0 Jan 20 20:11:04 crc kubenswrapper[4948]: I0120 20:11:04.735347 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" event={"ID":"f09e49c0-dab2-42af-bba9-2def7afc1087","Type":"ContainerDied","Data":"c447a54d34e0accec44b65840a52d63790ae92c7ec7ece51fd003612cb803c30"} Jan 20 20:11:04 crc kubenswrapper[4948]: I0120 20:11:04.740537 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8c30b121-20f6-47ad-89e0-ce511df4efb7","Type":"ContainerStarted","Data":"2ee95c9f63e0544d9ad20d69379c058fa6c4101144e7499403689a88fcee28ea"} Jan 20 20:11:04 crc kubenswrapper[4948]: I0120 20:11:04.742431 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"899d2813-4685-40b7-ba95-60d3126802a2","Type":"ContainerStarted","Data":"1514c8ffec260e64b2b179100c93e27d397697bd498922b808cd03d459a51d08"} Jan 20 20:11:05 crc kubenswrapper[4948]: I0120 20:11:05.758105 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" event={"ID":"f09e49c0-dab2-42af-bba9-2def7afc1087","Type":"ContainerStarted","Data":"9da5f582ccbf1abe2840c3aac691c11c23825a932ae0b705d55126f794f7cca8"} Jan 20 20:11:05 crc kubenswrapper[4948]: I0120 20:11:05.758630 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:05 crc kubenswrapper[4948]: I0120 20:11:05.800524 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" podStartSLOduration=3.800500843 podStartE2EDuration="3.800500843s" podCreationTimestamp="2026-01-20 20:11:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:11:05.78732412 +0000 UTC m=+1293.738049109" watchObservedRunningTime="2026-01-20 20:11:05.800500843 +0000 UTC m=+1293.751225812" Jan 20 20:11:12 crc kubenswrapper[4948]: I0120 20:11:12.808293 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:12 crc kubenswrapper[4948]: I0120 20:11:12.904076 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-zk22b"] Jan 20 20:11:12 crc kubenswrapper[4948]: I0120 20:11:12.904313 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" podUID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerName="dnsmasq-dns" containerID="cri-o://829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85" gracePeriod=10 Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.093899 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f4d4c4b7-5pcpw"] Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.095663 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.127389 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f4d4c4b7-5pcpw"] Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.217501 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-ovsdbserver-nb\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.217599 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6m45\" (UniqueName: \"kubernetes.io/projected/fb7020ef-1f09-4241-9001-eb628c16fd07-kube-api-access-d6m45\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.217643 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-dns-svc\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.218431 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-ovsdbserver-sb\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.218502 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-config\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.218569 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-dns-swift-storage-0\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.218691 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-openstack-edpm-ipam\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.321072 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-openstack-edpm-ipam\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.321215 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-ovsdbserver-nb\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.321276 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6m45\" (UniqueName: \"kubernetes.io/projected/fb7020ef-1f09-4241-9001-eb628c16fd07-kube-api-access-d6m45\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.321314 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-dns-svc\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.321352 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-ovsdbserver-sb\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.321395 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-config\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.321463 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-dns-swift-storage-0\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.322336 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-openstack-edpm-ipam\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.322464 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-ovsdbserver-sb\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.323060 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-ovsdbserver-nb\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.323238 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-dns-svc\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.327837 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-config\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.336530 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fb7020ef-1f09-4241-9001-eb628c16fd07-dns-swift-storage-0\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.368324 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6m45\" (UniqueName: \"kubernetes.io/projected/fb7020ef-1f09-4241-9001-eb628c16fd07-kube-api-access-d6m45\") pod \"dnsmasq-dns-f4d4c4b7-5pcpw\" (UID: \"fb7020ef-1f09-4241-9001-eb628c16fd07\") " pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.426323 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.569573 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.729239 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmngg\" (UniqueName: \"kubernetes.io/projected/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-kube-api-access-rmngg\") pod \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.729339 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-swift-storage-0\") pod \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.729454 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-config\") pod \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.729545 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-svc\") pod \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.729580 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-nb\") pod \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.729641 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-sb\") pod \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\" (UID: \"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3\") " Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.739281 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-kube-api-access-rmngg" (OuterVolumeSpecName: "kube-api-access-rmngg") pod "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" (UID: "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3"). InnerVolumeSpecName "kube-api-access-rmngg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.782345 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" (UID: "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.787068 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" (UID: "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.803559 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-config" (OuterVolumeSpecName: "config") pod "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" (UID: "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.806738 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" (UID: "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.817599 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" (UID: "5219f6f2-82bd-4f53-8f8c-be82ae5acbc3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.840452 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.840503 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.840515 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.840526 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.840536 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmngg\" (UniqueName: \"kubernetes.io/projected/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-kube-api-access-rmngg\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.840545 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.844824 4948 generic.go:334] "Generic (PLEG): container finished" podID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerID="829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85" exitCode=0 Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.845125 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.845130 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" event={"ID":"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3","Type":"ContainerDied","Data":"829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85"} Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.845162 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" event={"ID":"5219f6f2-82bd-4f53-8f8c-be82ae5acbc3","Type":"ContainerDied","Data":"ba182ea099880231c785fee90ea789b34d6c3a16d26ae029f1b91f111582ab53"} Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.845179 4948 scope.go:117] "RemoveContainer" containerID="829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.870534 4948 scope.go:117] "RemoveContainer" containerID="75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.888633 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-zk22b"] Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.898760 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-zk22b"] Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.901999 4948 scope.go:117] "RemoveContainer" containerID="829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85" Jan 20 20:11:13 crc kubenswrapper[4948]: E0120 20:11:13.902631 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85\": container with ID starting with 829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85 not found: ID does not exist" containerID="829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.902676 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85"} err="failed to get container status \"829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85\": rpc error: code = NotFound desc = could not find container \"829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85\": container with ID starting with 829fac0441734060fcca2ca7ca2f5627533a4988a8a28c98cec763ef986bef85 not found: ID does not exist" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.902719 4948 scope.go:117] "RemoveContainer" containerID="75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447" Jan 20 20:11:13 crc kubenswrapper[4948]: E0120 20:11:13.903073 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447\": container with ID starting with 75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447 not found: ID does not exist" containerID="75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.903092 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447"} err="failed to get container status \"75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447\": rpc error: code = NotFound desc = could not find container \"75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447\": container with ID starting with 75385b904bc1a7311075cad4e9347dab4527241e5dbd54a63a6a7f768f732447 not found: ID does not exist" Jan 20 20:11:13 crc kubenswrapper[4948]: I0120 20:11:13.938265 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f4d4c4b7-5pcpw"] Jan 20 20:11:14 crc kubenswrapper[4948]: I0120 20:11:14.580481 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" path="/var/lib/kubelet/pods/5219f6f2-82bd-4f53-8f8c-be82ae5acbc3/volumes" Jan 20 20:11:14 crc kubenswrapper[4948]: I0120 20:11:14.857057 4948 generic.go:334] "Generic (PLEG): container finished" podID="fb7020ef-1f09-4241-9001-eb628c16fd07" containerID="4c5f422100d046ff1aa8d04eaad7cd9ab02cd4753194fce942e93cd4000414a6" exitCode=0 Jan 20 20:11:14 crc kubenswrapper[4948]: I0120 20:11:14.857114 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" event={"ID":"fb7020ef-1f09-4241-9001-eb628c16fd07","Type":"ContainerDied","Data":"4c5f422100d046ff1aa8d04eaad7cd9ab02cd4753194fce942e93cd4000414a6"} Jan 20 20:11:14 crc kubenswrapper[4948]: I0120 20:11:14.858079 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" event={"ID":"fb7020ef-1f09-4241-9001-eb628c16fd07","Type":"ContainerStarted","Data":"2f746f035781404b0fc331794baeb14b53bd005fa416669766e058bf456b0f4e"} Jan 20 20:11:15 crc kubenswrapper[4948]: I0120 20:11:15.882055 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" event={"ID":"fb7020ef-1f09-4241-9001-eb628c16fd07","Type":"ContainerStarted","Data":"d0ae26b30ca9330eececae85596abef356c94333d01eeaeb9c1868c351f4363b"} Jan 20 20:11:15 crc kubenswrapper[4948]: I0120 20:11:15.882495 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:15 crc kubenswrapper[4948]: I0120 20:11:15.901064 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" podStartSLOduration=2.901046479 podStartE2EDuration="2.901046479s" podCreationTimestamp="2026-01-20 20:11:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:11:15.900981427 +0000 UTC m=+1303.851706396" watchObservedRunningTime="2026-01-20 20:11:15.901046479 +0000 UTC m=+1303.851771448" Jan 20 20:11:18 crc kubenswrapper[4948]: I0120 20:11:18.390516 4948 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-89c5cd4d5-zk22b" podUID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.200:5353: i/o timeout" Jan 20 20:11:20 crc kubenswrapper[4948]: I0120 20:11:20.249668 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:11:20 crc kubenswrapper[4948]: I0120 20:11:20.250033 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:11:23 crc kubenswrapper[4948]: I0120 20:11:23.428036 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f4d4c4b7-5pcpw" Jan 20 20:11:23 crc kubenswrapper[4948]: I0120 20:11:23.511338 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-wrtnd"] Jan 20 20:11:23 crc kubenswrapper[4948]: I0120 20:11:23.511594 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" podUID="f09e49c0-dab2-42af-bba9-2def7afc1087" containerName="dnsmasq-dns" containerID="cri-o://9da5f582ccbf1abe2840c3aac691c11c23825a932ae0b705d55126f794f7cca8" gracePeriod=10 Jan 20 20:11:23 crc kubenswrapper[4948]: I0120 20:11:23.974662 4948 generic.go:334] "Generic (PLEG): container finished" podID="f09e49c0-dab2-42af-bba9-2def7afc1087" containerID="9da5f582ccbf1abe2840c3aac691c11c23825a932ae0b705d55126f794f7cca8" exitCode=0 Jan 20 20:11:23 crc kubenswrapper[4948]: I0120 20:11:23.974753 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" event={"ID":"f09e49c0-dab2-42af-bba9-2def7afc1087","Type":"ContainerDied","Data":"9da5f582ccbf1abe2840c3aac691c11c23825a932ae0b705d55126f794f7cca8"} Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.085865 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.173775 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-nb\") pod \"f09e49c0-dab2-42af-bba9-2def7afc1087\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.173862 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-config\") pod \"f09e49c0-dab2-42af-bba9-2def7afc1087\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.173888 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-openstack-edpm-ipam\") pod \"f09e49c0-dab2-42af-bba9-2def7afc1087\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.173960 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-swift-storage-0\") pod \"f09e49c0-dab2-42af-bba9-2def7afc1087\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.173987 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgsnx\" (UniqueName: \"kubernetes.io/projected/f09e49c0-dab2-42af-bba9-2def7afc1087-kube-api-access-kgsnx\") pod \"f09e49c0-dab2-42af-bba9-2def7afc1087\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.174025 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-sb\") pod \"f09e49c0-dab2-42af-bba9-2def7afc1087\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.174048 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-svc\") pod \"f09e49c0-dab2-42af-bba9-2def7afc1087\" (UID: \"f09e49c0-dab2-42af-bba9-2def7afc1087\") " Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.191139 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f09e49c0-dab2-42af-bba9-2def7afc1087-kube-api-access-kgsnx" (OuterVolumeSpecName: "kube-api-access-kgsnx") pod "f09e49c0-dab2-42af-bba9-2def7afc1087" (UID: "f09e49c0-dab2-42af-bba9-2def7afc1087"). InnerVolumeSpecName "kube-api-access-kgsnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.234224 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f09e49c0-dab2-42af-bba9-2def7afc1087" (UID: "f09e49c0-dab2-42af-bba9-2def7afc1087"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.239366 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f09e49c0-dab2-42af-bba9-2def7afc1087" (UID: "f09e49c0-dab2-42af-bba9-2def7afc1087"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.242467 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "f09e49c0-dab2-42af-bba9-2def7afc1087" (UID: "f09e49c0-dab2-42af-bba9-2def7afc1087"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.250432 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f09e49c0-dab2-42af-bba9-2def7afc1087" (UID: "f09e49c0-dab2-42af-bba9-2def7afc1087"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.253722 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-config" (OuterVolumeSpecName: "config") pod "f09e49c0-dab2-42af-bba9-2def7afc1087" (UID: "f09e49c0-dab2-42af-bba9-2def7afc1087"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.262033 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f09e49c0-dab2-42af-bba9-2def7afc1087" (UID: "f09e49c0-dab2-42af-bba9-2def7afc1087"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.276863 4948 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.276896 4948 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.276908 4948 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.276917 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgsnx\" (UniqueName: \"kubernetes.io/projected/f09e49c0-dab2-42af-bba9-2def7afc1087-kube-api-access-kgsnx\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.276925 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.276934 4948 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.276941 4948 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f09e49c0-dab2-42af-bba9-2def7afc1087-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.984065 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" event={"ID":"f09e49c0-dab2-42af-bba9-2def7afc1087","Type":"ContainerDied","Data":"09e092956b40d3ea9cc21fc30d6a249f43a67c11ac74a0d4bcc3a50181fdef59"} Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.984127 4948 scope.go:117] "RemoveContainer" containerID="9da5f582ccbf1abe2840c3aac691c11c23825a932ae0b705d55126f794f7cca8" Jan 20 20:11:24 crc kubenswrapper[4948]: I0120 20:11:24.984136 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-wrtnd" Jan 20 20:11:25 crc kubenswrapper[4948]: I0120 20:11:25.004570 4948 scope.go:117] "RemoveContainer" containerID="c447a54d34e0accec44b65840a52d63790ae92c7ec7ece51fd003612cb803c30" Jan 20 20:11:25 crc kubenswrapper[4948]: I0120 20:11:25.042494 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-wrtnd"] Jan 20 20:11:25 crc kubenswrapper[4948]: I0120 20:11:25.065168 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-wrtnd"] Jan 20 20:11:26 crc kubenswrapper[4948]: I0120 20:11:26.584512 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f09e49c0-dab2-42af-bba9-2def7afc1087" path="/var/lib/kubelet/pods/f09e49c0-dab2-42af-bba9-2def7afc1087/volumes" Jan 20 20:11:36 crc kubenswrapper[4948]: I0120 20:11:36.103307 4948 generic.go:334] "Generic (PLEG): container finished" podID="8c30b121-20f6-47ad-89e0-ce511df4efb7" containerID="2ee95c9f63e0544d9ad20d69379c058fa6c4101144e7499403689a88fcee28ea" exitCode=0 Jan 20 20:11:36 crc kubenswrapper[4948]: I0120 20:11:36.103388 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8c30b121-20f6-47ad-89e0-ce511df4efb7","Type":"ContainerDied","Data":"2ee95c9f63e0544d9ad20d69379c058fa6c4101144e7499403689a88fcee28ea"} Jan 20 20:11:37 crc kubenswrapper[4948]: I0120 20:11:37.115385 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"8c30b121-20f6-47ad-89e0-ce511df4efb7","Type":"ContainerStarted","Data":"44fa11c706c0a4e9e93b02813de4d4117c712bd9ebffdfdecfb8bd6c3fcebc8e"} Jan 20 20:11:37 crc kubenswrapper[4948]: I0120 20:11:37.118424 4948 generic.go:334] "Generic (PLEG): container finished" podID="899d2813-4685-40b7-ba95-60d3126802a2" containerID="1514c8ffec260e64b2b179100c93e27d397697bd498922b808cd03d459a51d08" exitCode=0 Jan 20 20:11:37 crc kubenswrapper[4948]: I0120 20:11:37.118575 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"899d2813-4685-40b7-ba95-60d3126802a2","Type":"ContainerDied","Data":"1514c8ffec260e64b2b179100c93e27d397697bd498922b808cd03d459a51d08"} Jan 20 20:11:37 crc kubenswrapper[4948]: I0120 20:11:37.153362 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.153335402 podStartE2EDuration="37.153335402s" podCreationTimestamp="2026-01-20 20:11:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:11:37.14831976 +0000 UTC m=+1325.099044749" watchObservedRunningTime="2026-01-20 20:11:37.153335402 +0000 UTC m=+1325.104060371" Jan 20 20:11:38 crc kubenswrapper[4948]: I0120 20:11:38.130326 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"899d2813-4685-40b7-ba95-60d3126802a2","Type":"ContainerStarted","Data":"d7907f5756d7b3ade99455f01334d93832f33f8ff4378e1ea7c0df5e6fbca1a1"} Jan 20 20:11:38 crc kubenswrapper[4948]: I0120 20:11:38.130888 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:38 crc kubenswrapper[4948]: I0120 20:11:38.160975 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.160946363 podStartE2EDuration="36.160946363s" podCreationTimestamp="2026-01-20 20:11:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:11:38.155906201 +0000 UTC m=+1326.106631190" watchObservedRunningTime="2026-01-20 20:11:38.160946363 +0000 UTC m=+1326.111671352" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.457049 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.528474 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl"] Jan 20 20:11:41 crc kubenswrapper[4948]: E0120 20:11:41.535469 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerName="dnsmasq-dns" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.535722 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerName="dnsmasq-dns" Jan 20 20:11:41 crc kubenswrapper[4948]: E0120 20:11:41.535833 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f09e49c0-dab2-42af-bba9-2def7afc1087" containerName="dnsmasq-dns" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.535911 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f09e49c0-dab2-42af-bba9-2def7afc1087" containerName="dnsmasq-dns" Jan 20 20:11:41 crc kubenswrapper[4948]: E0120 20:11:41.536019 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerName="init" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.536099 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerName="init" Jan 20 20:11:41 crc kubenswrapper[4948]: E0120 20:11:41.536195 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f09e49c0-dab2-42af-bba9-2def7afc1087" containerName="init" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.536267 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="f09e49c0-dab2-42af-bba9-2def7afc1087" containerName="init" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.536590 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="f09e49c0-dab2-42af-bba9-2def7afc1087" containerName="dnsmasq-dns" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.536792 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5219f6f2-82bd-4f53-8f8c-be82ae5acbc3" containerName="dnsmasq-dns" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.537858 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.541417 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.542210 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.542235 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.543966 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.565069 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl"] Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.650825 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.651146 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rks9x\" (UniqueName: \"kubernetes.io/projected/5a4fea5f-1b46-482d-a956-9307be45284c-kube-api-access-rks9x\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.651183 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.651205 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.753182 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rks9x\" (UniqueName: \"kubernetes.io/projected/5a4fea5f-1b46-482d-a956-9307be45284c-kube-api-access-rks9x\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.753244 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.753285 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.753331 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.759462 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.760485 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.772335 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.775070 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rks9x\" (UniqueName: \"kubernetes.io/projected/5a4fea5f-1b46-482d-a956-9307be45284c-kube-api-access-rks9x\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-482zl\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:41 crc kubenswrapper[4948]: I0120 20:11:41.868555 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:11:42 crc kubenswrapper[4948]: I0120 20:11:42.641333 4948 scope.go:117] "RemoveContainer" containerID="e212820504850ebcb9992e631d79fba8a0d64cf4d4a9aa6a634242539f0da7c9" Jan 20 20:11:42 crc kubenswrapper[4948]: I0120 20:11:42.648574 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl"] Jan 20 20:11:42 crc kubenswrapper[4948]: I0120 20:11:42.718067 4948 scope.go:117] "RemoveContainer" containerID="f487e4e91ecaa0711310c8e0b7acc4cff2d35e96dd3ae6fa1f545418d6f523a9" Jan 20 20:11:42 crc kubenswrapper[4948]: I0120 20:11:42.743012 4948 scope.go:117] "RemoveContainer" containerID="fe77cc93577f6f2e5cf5e29437b5b5d2a9d3b82677502716ff829fd93a0bf771" Jan 20 20:11:43 crc kubenswrapper[4948]: I0120 20:11:43.192242 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" event={"ID":"5a4fea5f-1b46-482d-a956-9307be45284c","Type":"ContainerStarted","Data":"8621d7afcc4cbc8292858266e8347e0169760f454c149c13ae640e12a253f69d"} Jan 20 20:11:50 crc kubenswrapper[4948]: I0120 20:11:50.250011 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:11:50 crc kubenswrapper[4948]: I0120 20:11:50.250833 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:11:50 crc kubenswrapper[4948]: I0120 20:11:50.250888 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:11:50 crc kubenswrapper[4948]: I0120 20:11:50.251680 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f6e2109b164e1a5b2cd57afe834ac3fbe85f27835236a7bebdf71bc6a9761ad"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:11:50 crc kubenswrapper[4948]: I0120 20:11:50.251760 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://7f6e2109b164e1a5b2cd57afe834ac3fbe85f27835236a7bebdf71bc6a9761ad" gracePeriod=600 Jan 20 20:11:51 crc kubenswrapper[4948]: I0120 20:11:51.461894 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 20 20:11:51 crc kubenswrapper[4948]: I0120 20:11:51.526228 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="7f6e2109b164e1a5b2cd57afe834ac3fbe85f27835236a7bebdf71bc6a9761ad" exitCode=0 Jan 20 20:11:51 crc kubenswrapper[4948]: I0120 20:11:51.526271 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"7f6e2109b164e1a5b2cd57afe834ac3fbe85f27835236a7bebdf71bc6a9761ad"} Jan 20 20:11:51 crc kubenswrapper[4948]: I0120 20:11:51.526316 4948 scope.go:117] "RemoveContainer" containerID="a26c04565cc618f3f275d4a90dd01432ac1f9fe490efd0919ef900cbd2cc4e1c" Jan 20 20:11:52 crc kubenswrapper[4948]: I0120 20:11:52.506352 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 20 20:11:54 crc kubenswrapper[4948]: I0120 20:11:54.585220 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f"} Jan 20 20:11:54 crc kubenswrapper[4948]: I0120 20:11:54.590080 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" event={"ID":"5a4fea5f-1b46-482d-a956-9307be45284c","Type":"ContainerStarted","Data":"c37dd6c2b322443a2de19098dcb1c9d43fe1c1221e36a951c4f4252ed54dfbc0"} Jan 20 20:11:54 crc kubenswrapper[4948]: I0120 20:11:54.637616 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" podStartSLOduration=2.278027725 podStartE2EDuration="13.637577728s" podCreationTimestamp="2026-01-20 20:11:41 +0000 UTC" firstStartedPulling="2026-01-20 20:11:42.671957827 +0000 UTC m=+1330.622682796" lastFinishedPulling="2026-01-20 20:11:54.03150783 +0000 UTC m=+1341.982232799" observedRunningTime="2026-01-20 20:11:54.627188854 +0000 UTC m=+1342.577913823" watchObservedRunningTime="2026-01-20 20:11:54.637577728 +0000 UTC m=+1342.588302697" Jan 20 20:12:06 crc kubenswrapper[4948]: I0120 20:12:06.706626 4948 generic.go:334] "Generic (PLEG): container finished" podID="5a4fea5f-1b46-482d-a956-9307be45284c" containerID="c37dd6c2b322443a2de19098dcb1c9d43fe1c1221e36a951c4f4252ed54dfbc0" exitCode=0 Jan 20 20:12:06 crc kubenswrapper[4948]: I0120 20:12:06.706719 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" event={"ID":"5a4fea5f-1b46-482d-a956-9307be45284c","Type":"ContainerDied","Data":"c37dd6c2b322443a2de19098dcb1c9d43fe1c1221e36a951c4f4252ed54dfbc0"} Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.192437 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.322653 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rks9x\" (UniqueName: \"kubernetes.io/projected/5a4fea5f-1b46-482d-a956-9307be45284c-kube-api-access-rks9x\") pod \"5a4fea5f-1b46-482d-a956-9307be45284c\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.323260 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-ssh-key-openstack-edpm-ipam\") pod \"5a4fea5f-1b46-482d-a956-9307be45284c\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.323742 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-repo-setup-combined-ca-bundle\") pod \"5a4fea5f-1b46-482d-a956-9307be45284c\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.324132 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-inventory\") pod \"5a4fea5f-1b46-482d-a956-9307be45284c\" (UID: \"5a4fea5f-1b46-482d-a956-9307be45284c\") " Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.328818 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "5a4fea5f-1b46-482d-a956-9307be45284c" (UID: "5a4fea5f-1b46-482d-a956-9307be45284c"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.335982 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a4fea5f-1b46-482d-a956-9307be45284c-kube-api-access-rks9x" (OuterVolumeSpecName: "kube-api-access-rks9x") pod "5a4fea5f-1b46-482d-a956-9307be45284c" (UID: "5a4fea5f-1b46-482d-a956-9307be45284c"). InnerVolumeSpecName "kube-api-access-rks9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.352552 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-inventory" (OuterVolumeSpecName: "inventory") pod "5a4fea5f-1b46-482d-a956-9307be45284c" (UID: "5a4fea5f-1b46-482d-a956-9307be45284c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.360000 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "5a4fea5f-1b46-482d-a956-9307be45284c" (UID: "5a4fea5f-1b46-482d-a956-9307be45284c"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.427462 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.427491 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rks9x\" (UniqueName: \"kubernetes.io/projected/5a4fea5f-1b46-482d-a956-9307be45284c-kube-api-access-rks9x\") on node \"crc\" DevicePath \"\"" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.427503 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.427513 4948 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5a4fea5f-1b46-482d-a956-9307be45284c-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.724755 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" event={"ID":"5a4fea5f-1b46-482d-a956-9307be45284c","Type":"ContainerDied","Data":"8621d7afcc4cbc8292858266e8347e0169760f454c149c13ae640e12a253f69d"} Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.725022 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8621d7afcc4cbc8292858266e8347e0169760f454c149c13ae640e12a253f69d" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.724803 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-482zl" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.821684 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf"] Jan 20 20:12:08 crc kubenswrapper[4948]: E0120 20:12:08.822232 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a4fea5f-1b46-482d-a956-9307be45284c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.822254 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a4fea5f-1b46-482d-a956-9307be45284c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.822431 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a4fea5f-1b46-482d-a956-9307be45284c" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.823141 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.826368 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.826428 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.827187 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.829738 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.842425 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf"] Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.935199 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.935358 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:08 crc kubenswrapper[4948]: I0120 20:12:08.935458 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsn7f\" (UniqueName: \"kubernetes.io/projected/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-kube-api-access-zsn7f\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:09 crc kubenswrapper[4948]: I0120 20:12:09.073270 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:09 crc kubenswrapper[4948]: I0120 20:12:09.073346 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:09 crc kubenswrapper[4948]: I0120 20:12:09.073407 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsn7f\" (UniqueName: \"kubernetes.io/projected/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-kube-api-access-zsn7f\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:09 crc kubenswrapper[4948]: I0120 20:12:09.079525 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:09 crc kubenswrapper[4948]: I0120 20:12:09.092277 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:09 crc kubenswrapper[4948]: I0120 20:12:09.110371 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsn7f\" (UniqueName: \"kubernetes.io/projected/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-kube-api-access-zsn7f\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-2bxbf\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:09 crc kubenswrapper[4948]: I0120 20:12:09.145841 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:10 crc kubenswrapper[4948]: I0120 20:12:09.702694 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf"] Jan 20 20:12:10 crc kubenswrapper[4948]: I0120 20:12:09.736224 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" event={"ID":"cd1a8ab5-15f0-4194-bb29-4bd56b856c33","Type":"ContainerStarted","Data":"080465ed6da34f8208a8ddb79d2539dfdb8efc4fa76b648504f557ce69016f63"} Jan 20 20:12:10 crc kubenswrapper[4948]: I0120 20:12:10.748515 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" event={"ID":"cd1a8ab5-15f0-4194-bb29-4bd56b856c33","Type":"ContainerStarted","Data":"e9b46285c9693e5934214d2c96b9b079ffddee0a96cd7b2d132875390239ac58"} Jan 20 20:12:10 crc kubenswrapper[4948]: I0120 20:12:10.780171 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" podStartSLOduration=2.353094492 podStartE2EDuration="2.780150357s" podCreationTimestamp="2026-01-20 20:12:08 +0000 UTC" firstStartedPulling="2026-01-20 20:12:09.717225902 +0000 UTC m=+1357.667950871" lastFinishedPulling="2026-01-20 20:12:10.144281767 +0000 UTC m=+1358.095006736" observedRunningTime="2026-01-20 20:12:10.771543464 +0000 UTC m=+1358.722268453" watchObservedRunningTime="2026-01-20 20:12:10.780150357 +0000 UTC m=+1358.730875326" Jan 20 20:12:13 crc kubenswrapper[4948]: I0120 20:12:13.781904 4948 generic.go:334] "Generic (PLEG): container finished" podID="cd1a8ab5-15f0-4194-bb29-4bd56b856c33" containerID="e9b46285c9693e5934214d2c96b9b079ffddee0a96cd7b2d132875390239ac58" exitCode=0 Jan 20 20:12:13 crc kubenswrapper[4948]: I0120 20:12:13.781980 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" event={"ID":"cd1a8ab5-15f0-4194-bb29-4bd56b856c33","Type":"ContainerDied","Data":"e9b46285c9693e5934214d2c96b9b079ffddee0a96cd7b2d132875390239ac58"} Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.236444 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.393151 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsn7f\" (UniqueName: \"kubernetes.io/projected/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-kube-api-access-zsn7f\") pod \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.394103 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-ssh-key-openstack-edpm-ipam\") pod \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.394571 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-inventory\") pod \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\" (UID: \"cd1a8ab5-15f0-4194-bb29-4bd56b856c33\") " Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.399928 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-kube-api-access-zsn7f" (OuterVolumeSpecName: "kube-api-access-zsn7f") pod "cd1a8ab5-15f0-4194-bb29-4bd56b856c33" (UID: "cd1a8ab5-15f0-4194-bb29-4bd56b856c33"). InnerVolumeSpecName "kube-api-access-zsn7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.422821 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "cd1a8ab5-15f0-4194-bb29-4bd56b856c33" (UID: "cd1a8ab5-15f0-4194-bb29-4bd56b856c33"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.429808 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-inventory" (OuterVolumeSpecName: "inventory") pod "cd1a8ab5-15f0-4194-bb29-4bd56b856c33" (UID: "cd1a8ab5-15f0-4194-bb29-4bd56b856c33"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.498104 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.498273 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.498297 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsn7f\" (UniqueName: \"kubernetes.io/projected/cd1a8ab5-15f0-4194-bb29-4bd56b856c33-kube-api-access-zsn7f\") on node \"crc\" DevicePath \"\"" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.811583 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" event={"ID":"cd1a8ab5-15f0-4194-bb29-4bd56b856c33","Type":"ContainerDied","Data":"080465ed6da34f8208a8ddb79d2539dfdb8efc4fa76b648504f557ce69016f63"} Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.811945 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="080465ed6da34f8208a8ddb79d2539dfdb8efc4fa76b648504f557ce69016f63" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.811683 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-2bxbf" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.886280 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn"] Jan 20 20:12:15 crc kubenswrapper[4948]: E0120 20:12:15.886786 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd1a8ab5-15f0-4194-bb29-4bd56b856c33" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.886810 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd1a8ab5-15f0-4194-bb29-4bd56b856c33" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.887063 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd1a8ab5-15f0-4194-bb29-4bd56b856c33" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.887823 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.894512 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.897348 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.898091 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn"] Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.900316 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.900544 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.906680 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.907025 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.907192 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:15 crc kubenswrapper[4948]: I0120 20:12:15.907219 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7rrf\" (UniqueName: \"kubernetes.io/projected/11f8f855-5031-4c77-88c5-07f606419c1f-kube-api-access-l7rrf\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.008549 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.008657 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.008687 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7rrf\" (UniqueName: \"kubernetes.io/projected/11f8f855-5031-4c77-88c5-07f606419c1f-kube-api-access-l7rrf\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.008857 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.014787 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.015183 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.015480 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.027018 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7rrf\" (UniqueName: \"kubernetes.io/projected/11f8f855-5031-4c77-88c5-07f606419c1f-kube-api-access-l7rrf\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.206555 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.779251 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn"] Jan 20 20:12:16 crc kubenswrapper[4948]: I0120 20:12:16.832633 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" event={"ID":"11f8f855-5031-4c77-88c5-07f606419c1f","Type":"ContainerStarted","Data":"5c0b99a99a0239c2882beed44ca36764d3390b904fd39f9e3f033351593bee3b"} Jan 20 20:12:17 crc kubenswrapper[4948]: I0120 20:12:17.842217 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" event={"ID":"11f8f855-5031-4c77-88c5-07f606419c1f","Type":"ContainerStarted","Data":"29bcafe5162380f908606e05b4123f93fcb02c98b477b57de70935e03fe19d4e"} Jan 20 20:12:17 crc kubenswrapper[4948]: I0120 20:12:17.869201 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" podStartSLOduration=2.36092876 podStartE2EDuration="2.869177233s" podCreationTimestamp="2026-01-20 20:12:15 +0000 UTC" firstStartedPulling="2026-01-20 20:12:16.79367351 +0000 UTC m=+1364.744398479" lastFinishedPulling="2026-01-20 20:12:17.301921973 +0000 UTC m=+1365.252646952" observedRunningTime="2026-01-20 20:12:17.858387176 +0000 UTC m=+1365.809112165" watchObservedRunningTime="2026-01-20 20:12:17.869177233 +0000 UTC m=+1365.819902202" Jan 20 20:12:42 crc kubenswrapper[4948]: I0120 20:12:42.918168 4948 scope.go:117] "RemoveContainer" containerID="0b5aaedfab46e66448fad5ad92ee3a5eda8f5f5bd28cf9a0b4321a1439fc928f" Jan 20 20:12:42 crc kubenswrapper[4948]: I0120 20:12:42.942120 4948 scope.go:117] "RemoveContainer" containerID="198ead04e01000671cd4aa517213a35c4ae105bdad71c32c3dc17624585693bc" Jan 20 20:12:42 crc kubenswrapper[4948]: I0120 20:12:42.975339 4948 scope.go:117] "RemoveContainer" containerID="5356317bcc14d3e40adcca640d6e6651c15bbdf7ac8705cb0e9d8e70825a8966" Jan 20 20:14:20 crc kubenswrapper[4948]: I0120 20:14:20.249823 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:14:20 crc kubenswrapper[4948]: I0120 20:14:20.250365 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:14:50 crc kubenswrapper[4948]: I0120 20:14:50.249784 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:14:50 crc kubenswrapper[4948]: I0120 20:14:50.250345 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:14:53 crc kubenswrapper[4948]: I0120 20:14:53.049438 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-4a12-account-create-update-l49lt"] Jan 20 20:14:53 crc kubenswrapper[4948]: I0120 20:14:53.057647 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-4a12-account-create-update-l49lt"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.041423 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-dz2hg"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.052511 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-k8npv"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.067096 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-wfsm8"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.079734 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-1cf5-account-create-update-tjktc"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.093816 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-dz2hg"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.108578 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-b435-account-create-update-fcfpr"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.118761 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-wfsm8"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.132262 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-k8npv"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.144016 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-b435-account-create-update-fcfpr"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.155807 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-1cf5-account-create-update-tjktc"] Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.581560 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d2ae321-a5cb-4018-8899-7de265e16c0f" path="/var/lib/kubelet/pods/0d2ae321-a5cb-4018-8899-7de265e16c0f/volumes" Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.582324 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ce6b227-ed6f-44d8-b9d1-e906bd3457fe" path="/var/lib/kubelet/pods/4ce6b227-ed6f-44d8-b9d1-e906bd3457fe/volumes" Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.582967 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86e10f1b-6bf7-4a69-b49d-b360c73a5a65" path="/var/lib/kubelet/pods/86e10f1b-6bf7-4a69-b49d-b360c73a5a65/volumes" Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.583630 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e7c10dc-5215-41dc-80b4-00bc47be99e8" path="/var/lib/kubelet/pods/8e7c10dc-5215-41dc-80b4-00bc47be99e8/volumes" Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.584901 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3cfb075-5fb9-4769-be33-338ef93623d2" path="/var/lib/kubelet/pods/c3cfb075-5fb9-4769-be33-338ef93623d2/volumes" Jan 20 20:14:54 crc kubenswrapper[4948]: I0120 20:14:54.585735 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc011d48-6711-420d-911f-ffda06687982" path="/var/lib/kubelet/pods/dc011d48-6711-420d-911f-ffda06687982/volumes" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.158150 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl"] Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.159643 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.162288 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.162624 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.231957 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl"] Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.328317 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-config-volume\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.328451 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnnbv\" (UniqueName: \"kubernetes.io/projected/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-kube-api-access-tnnbv\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.328629 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-secret-volume\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.430189 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-config-volume\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.430274 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnnbv\" (UniqueName: \"kubernetes.io/projected/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-kube-api-access-tnnbv\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.430408 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-secret-volume\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.432666 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-config-volume\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.438577 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-secret-volume\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.453398 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnnbv\" (UniqueName: \"kubernetes.io/projected/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-kube-api-access-tnnbv\") pod \"collect-profiles-29482335-d94gl\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:00 crc kubenswrapper[4948]: I0120 20:15:00.488544 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:01 crc kubenswrapper[4948]: I0120 20:15:01.117379 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl"] Jan 20 20:15:02 crc kubenswrapper[4948]: I0120 20:15:02.082805 4948 generic.go:334] "Generic (PLEG): container finished" podID="41464c5c-9486-4ec9-bb98-ff7d1edf9f29" containerID="487ed09f2dd4026ddbfc4d3d5bc5512ecc7f447a233eedc4cf433bb69cfa10ce" exitCode=0 Jan 20 20:15:02 crc kubenswrapper[4948]: I0120 20:15:02.082920 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" event={"ID":"41464c5c-9486-4ec9-bb98-ff7d1edf9f29","Type":"ContainerDied","Data":"487ed09f2dd4026ddbfc4d3d5bc5512ecc7f447a233eedc4cf433bb69cfa10ce"} Jan 20 20:15:02 crc kubenswrapper[4948]: I0120 20:15:02.083153 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" event={"ID":"41464c5c-9486-4ec9-bb98-ff7d1edf9f29","Type":"ContainerStarted","Data":"2bbb897d443b6cc0337ccd59738b7830dbe107ff37819c77770b6f32d1028f06"} Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.047631 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-spj97"] Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.056917 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-spj97"] Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.468965 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.599474 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-secret-volume\") pod \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.599995 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-config-volume\") pod \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.600124 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnnbv\" (UniqueName: \"kubernetes.io/projected/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-kube-api-access-tnnbv\") pod \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\" (UID: \"41464c5c-9486-4ec9-bb98-ff7d1edf9f29\") " Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.601741 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-config-volume" (OuterVolumeSpecName: "config-volume") pod "41464c5c-9486-4ec9-bb98-ff7d1edf9f29" (UID: "41464c5c-9486-4ec9-bb98-ff7d1edf9f29"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.615325 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "41464c5c-9486-4ec9-bb98-ff7d1edf9f29" (UID: "41464c5c-9486-4ec9-bb98-ff7d1edf9f29"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.635013 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-kube-api-access-tnnbv" (OuterVolumeSpecName: "kube-api-access-tnnbv") pod "41464c5c-9486-4ec9-bb98-ff7d1edf9f29" (UID: "41464c5c-9486-4ec9-bb98-ff7d1edf9f29"). InnerVolumeSpecName "kube-api-access-tnnbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.702768 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnnbv\" (UniqueName: \"kubernetes.io/projected/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-kube-api-access-tnnbv\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.702821 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:03 crc kubenswrapper[4948]: I0120 20:15:03.702833 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/41464c5c-9486-4ec9-bb98-ff7d1edf9f29-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:04 crc kubenswrapper[4948]: I0120 20:15:04.105411 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" event={"ID":"41464c5c-9486-4ec9-bb98-ff7d1edf9f29","Type":"ContainerDied","Data":"2bbb897d443b6cc0337ccd59738b7830dbe107ff37819c77770b6f32d1028f06"} Jan 20 20:15:04 crc kubenswrapper[4948]: I0120 20:15:04.105471 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl" Jan 20 20:15:04 crc kubenswrapper[4948]: I0120 20:15:04.105477 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2bbb897d443b6cc0337ccd59738b7830dbe107ff37819c77770b6f32d1028f06" Jan 20 20:15:04 crc kubenswrapper[4948]: I0120 20:15:04.582013 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aead4ceb-154b-4822-b17a-46313fc78eaf" path="/var/lib/kubelet/pods/aead4ceb-154b-4822-b17a-46313fc78eaf/volumes" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.479450 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kdnbz"] Jan 20 20:15:13 crc kubenswrapper[4948]: E0120 20:15:13.480390 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41464c5c-9486-4ec9-bb98-ff7d1edf9f29" containerName="collect-profiles" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.480409 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="41464c5c-9486-4ec9-bb98-ff7d1edf9f29" containerName="collect-profiles" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.480623 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="41464c5c-9486-4ec9-bb98-ff7d1edf9f29" containerName="collect-profiles" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.484356 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.524244 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kdnbz"] Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.628320 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-catalog-content\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.628394 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-utilities\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.628452 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8br5\" (UniqueName: \"kubernetes.io/projected/cf507409-8c66-4e70-bcbb-d9882cd01d96-kube-api-access-j8br5\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.730603 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-catalog-content\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.730699 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-utilities\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.730805 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8br5\" (UniqueName: \"kubernetes.io/projected/cf507409-8c66-4e70-bcbb-d9882cd01d96-kube-api-access-j8br5\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.731235 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-catalog-content\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.731517 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-utilities\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.751582 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8br5\" (UniqueName: \"kubernetes.io/projected/cf507409-8c66-4e70-bcbb-d9882cd01d96-kube-api-access-j8br5\") pod \"certified-operators-kdnbz\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:13 crc kubenswrapper[4948]: I0120 20:15:13.849032 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:14 crc kubenswrapper[4948]: I0120 20:15:14.384485 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kdnbz"] Jan 20 20:15:15 crc kubenswrapper[4948]: I0120 20:15:15.215923 4948 generic.go:334] "Generic (PLEG): container finished" podID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerID="2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24" exitCode=0 Jan 20 20:15:15 crc kubenswrapper[4948]: I0120 20:15:15.215997 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kdnbz" event={"ID":"cf507409-8c66-4e70-bcbb-d9882cd01d96","Type":"ContainerDied","Data":"2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24"} Jan 20 20:15:15 crc kubenswrapper[4948]: I0120 20:15:15.216330 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kdnbz" event={"ID":"cf507409-8c66-4e70-bcbb-d9882cd01d96","Type":"ContainerStarted","Data":"e9275571f8b381abdd2f72c2c04e06431078859676fbfba980ec619180bf54b1"} Jan 20 20:15:15 crc kubenswrapper[4948]: I0120 20:15:15.219389 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:15:16 crc kubenswrapper[4948]: I0120 20:15:16.226950 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kdnbz" event={"ID":"cf507409-8c66-4e70-bcbb-d9882cd01d96","Type":"ContainerStarted","Data":"7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe"} Jan 20 20:15:18 crc kubenswrapper[4948]: I0120 20:15:18.248098 4948 generic.go:334] "Generic (PLEG): container finished" podID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerID="7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe" exitCode=0 Jan 20 20:15:18 crc kubenswrapper[4948]: I0120 20:15:18.248183 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kdnbz" event={"ID":"cf507409-8c66-4e70-bcbb-d9882cd01d96","Type":"ContainerDied","Data":"7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe"} Jan 20 20:15:19 crc kubenswrapper[4948]: I0120 20:15:19.258531 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kdnbz" event={"ID":"cf507409-8c66-4e70-bcbb-d9882cd01d96","Type":"ContainerStarted","Data":"ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed"} Jan 20 20:15:19 crc kubenswrapper[4948]: I0120 20:15:19.281503 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kdnbz" podStartSLOduration=2.750850138 podStartE2EDuration="6.281459167s" podCreationTimestamp="2026-01-20 20:15:13 +0000 UTC" firstStartedPulling="2026-01-20 20:15:15.219158743 +0000 UTC m=+1543.169883712" lastFinishedPulling="2026-01-20 20:15:18.749767782 +0000 UTC m=+1546.700492741" observedRunningTime="2026-01-20 20:15:19.277135253 +0000 UTC m=+1547.227860222" watchObservedRunningTime="2026-01-20 20:15:19.281459167 +0000 UTC m=+1547.232184136" Jan 20 20:15:20 crc kubenswrapper[4948]: I0120 20:15:20.249478 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:15:20 crc kubenswrapper[4948]: I0120 20:15:20.249540 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:15:20 crc kubenswrapper[4948]: I0120 20:15:20.249635 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:15:20 crc kubenswrapper[4948]: I0120 20:15:20.250447 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:15:20 crc kubenswrapper[4948]: I0120 20:15:20.250536 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" gracePeriod=600 Jan 20 20:15:20 crc kubenswrapper[4948]: E0120 20:15:20.379008 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:15:21 crc kubenswrapper[4948]: I0120 20:15:21.278537 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" exitCode=0 Jan 20 20:15:21 crc kubenswrapper[4948]: I0120 20:15:21.278618 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f"} Jan 20 20:15:21 crc kubenswrapper[4948]: I0120 20:15:21.279113 4948 scope.go:117] "RemoveContainer" containerID="7f6e2109b164e1a5b2cd57afe834ac3fbe85f27835236a7bebdf71bc6a9761ad" Jan 20 20:15:21 crc kubenswrapper[4948]: I0120 20:15:21.279802 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:15:21 crc kubenswrapper[4948]: E0120 20:15:21.280139 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:15:23 crc kubenswrapper[4948]: I0120 20:15:23.849810 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:23 crc kubenswrapper[4948]: I0120 20:15:23.849862 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:23 crc kubenswrapper[4948]: I0120 20:15:23.912381 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:24 crc kubenswrapper[4948]: I0120 20:15:24.352084 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:24 crc kubenswrapper[4948]: I0120 20:15:24.405665 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kdnbz"] Jan 20 20:15:26 crc kubenswrapper[4948]: I0120 20:15:26.322166 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kdnbz" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerName="registry-server" containerID="cri-o://ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed" gracePeriod=2 Jan 20 20:15:26 crc kubenswrapper[4948]: I0120 20:15:26.845170 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:26 crc kubenswrapper[4948]: I0120 20:15:26.996633 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-catalog-content\") pod \"cf507409-8c66-4e70-bcbb-d9882cd01d96\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " Jan 20 20:15:26 crc kubenswrapper[4948]: I0120 20:15:26.996754 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-utilities\") pod \"cf507409-8c66-4e70-bcbb-d9882cd01d96\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " Jan 20 20:15:26 crc kubenswrapper[4948]: I0120 20:15:26.996907 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8br5\" (UniqueName: \"kubernetes.io/projected/cf507409-8c66-4e70-bcbb-d9882cd01d96-kube-api-access-j8br5\") pod \"cf507409-8c66-4e70-bcbb-d9882cd01d96\" (UID: \"cf507409-8c66-4e70-bcbb-d9882cd01d96\") " Jan 20 20:15:26 crc kubenswrapper[4948]: I0120 20:15:26.998691 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-utilities" (OuterVolumeSpecName: "utilities") pod "cf507409-8c66-4e70-bcbb-d9882cd01d96" (UID: "cf507409-8c66-4e70-bcbb-d9882cd01d96"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.027024 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf507409-8c66-4e70-bcbb-d9882cd01d96-kube-api-access-j8br5" (OuterVolumeSpecName: "kube-api-access-j8br5") pod "cf507409-8c66-4e70-bcbb-d9882cd01d96" (UID: "cf507409-8c66-4e70-bcbb-d9882cd01d96"). InnerVolumeSpecName "kube-api-access-j8br5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.068959 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf507409-8c66-4e70-bcbb-d9882cd01d96" (UID: "cf507409-8c66-4e70-bcbb-d9882cd01d96"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.099644 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.099686 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8br5\" (UniqueName: \"kubernetes.io/projected/cf507409-8c66-4e70-bcbb-d9882cd01d96-kube-api-access-j8br5\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.099698 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf507409-8c66-4e70-bcbb-d9882cd01d96-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.332258 4948 generic.go:334] "Generic (PLEG): container finished" podID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerID="ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed" exitCode=0 Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.332303 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kdnbz" event={"ID":"cf507409-8c66-4e70-bcbb-d9882cd01d96","Type":"ContainerDied","Data":"ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed"} Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.332332 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kdnbz" event={"ID":"cf507409-8c66-4e70-bcbb-d9882cd01d96","Type":"ContainerDied","Data":"e9275571f8b381abdd2f72c2c04e06431078859676fbfba980ec619180bf54b1"} Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.332350 4948 scope.go:117] "RemoveContainer" containerID="ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.332350 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kdnbz" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.367879 4948 scope.go:117] "RemoveContainer" containerID="7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.372938 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kdnbz"] Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.382380 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kdnbz"] Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.403203 4948 scope.go:117] "RemoveContainer" containerID="2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.449925 4948 scope.go:117] "RemoveContainer" containerID="ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed" Jan 20 20:15:27 crc kubenswrapper[4948]: E0120 20:15:27.450408 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed\": container with ID starting with ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed not found: ID does not exist" containerID="ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.450458 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed"} err="failed to get container status \"ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed\": rpc error: code = NotFound desc = could not find container \"ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed\": container with ID starting with ec0815e0524bba01b05c41a0cae79ec56211671aa25fa427d17913d1035747ed not found: ID does not exist" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.450488 4948 scope.go:117] "RemoveContainer" containerID="7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe" Jan 20 20:15:27 crc kubenswrapper[4948]: E0120 20:15:27.450982 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe\": container with ID starting with 7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe not found: ID does not exist" containerID="7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.451002 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe"} err="failed to get container status \"7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe\": rpc error: code = NotFound desc = could not find container \"7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe\": container with ID starting with 7a2739f467779549ddca3afa3310c0d7d3c81b2ca40ffe93d1c4ae492869cdbe not found: ID does not exist" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.451015 4948 scope.go:117] "RemoveContainer" containerID="2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24" Jan 20 20:15:27 crc kubenswrapper[4948]: E0120 20:15:27.451250 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24\": container with ID starting with 2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24 not found: ID does not exist" containerID="2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24" Jan 20 20:15:27 crc kubenswrapper[4948]: I0120 20:15:27.451267 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24"} err="failed to get container status \"2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24\": rpc error: code = NotFound desc = could not find container \"2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24\": container with ID starting with 2bb0b7665672e8f9abf89bc4e3154d5b350bf1863e16ea0ac848fc2fafad0a24 not found: ID does not exist" Jan 20 20:15:28 crc kubenswrapper[4948]: I0120 20:15:28.581836 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" path="/var/lib/kubelet/pods/cf507409-8c66-4e70-bcbb-d9882cd01d96/volumes" Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.047226 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-5116-account-create-update-6hrrc"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.057491 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-5116-account-create-update-6hrrc"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.067654 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-ctqgn"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.085821 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-0912-account-create-update-r5z5f"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.102626 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-16db-account-create-update-d7lmx"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.116460 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-0912-account-create-update-r5z5f"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.126260 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-ctqgn"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.137127 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-16db-account-create-update-d7lmx"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.146305 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-qnfsz"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.153932 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-qnfsz"] Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.589574 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01681e12-ad87-49f8-8f36-0631b107e19d" path="/var/lib/kubelet/pods/01681e12-ad87-49f8-8f36-0631b107e19d/volumes" Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.590574 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19434efc-51da-454c-a87d-91bd70e97ad1" path="/var/lib/kubelet/pods/19434efc-51da-454c-a87d-91bd70e97ad1/volumes" Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.591450 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b8ef8bb-4baf-4b9e-b47f-e9b082d31759" path="/var/lib/kubelet/pods/5b8ef8bb-4baf-4b9e-b47f-e9b082d31759/volumes" Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.592326 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8665723e-3db4-4331-892a-015554f4c300" path="/var/lib/kubelet/pods/8665723e-3db4-4331-892a-015554f4c300/volumes" Jan 20 20:15:34 crc kubenswrapper[4948]: I0120 20:15:34.594521 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2522fe2-db81-4fae-abeb-e99db7690237" path="/var/lib/kubelet/pods/a2522fe2-db81-4fae-abeb-e99db7690237/volumes" Jan 20 20:15:35 crc kubenswrapper[4948]: I0120 20:15:35.033938 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-7x47d"] Jan 20 20:15:35 crc kubenswrapper[4948]: I0120 20:15:35.046463 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-7x47d"] Jan 20 20:15:35 crc kubenswrapper[4948]: I0120 20:15:35.570153 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:15:35 crc kubenswrapper[4948]: E0120 20:15:35.570430 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.530366 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vltgz"] Jan 20 20:15:36 crc kubenswrapper[4948]: E0120 20:15:36.531341 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerName="extract-utilities" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.531368 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerName="extract-utilities" Jan 20 20:15:36 crc kubenswrapper[4948]: E0120 20:15:36.531394 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerName="registry-server" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.531402 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerName="registry-server" Jan 20 20:15:36 crc kubenswrapper[4948]: E0120 20:15:36.531413 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerName="extract-content" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.531421 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerName="extract-content" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.531746 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf507409-8c66-4e70-bcbb-d9882cd01d96" containerName="registry-server" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.533781 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.543606 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vltgz"] Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.593142 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-utilities\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.593240 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5m9j\" (UniqueName: \"kubernetes.io/projected/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-kube-api-access-k5m9j\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.593360 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-catalog-content\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.618235 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2cf4ce2-6783-421e-9ca3-2bb938815f2f" path="/var/lib/kubelet/pods/d2cf4ce2-6783-421e-9ca3-2bb938815f2f/volumes" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.695063 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-catalog-content\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.695130 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-utilities\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.695194 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5m9j\" (UniqueName: \"kubernetes.io/projected/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-kube-api-access-k5m9j\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.695616 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-catalog-content\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.696021 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-utilities\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.714016 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5m9j\" (UniqueName: \"kubernetes.io/projected/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-kube-api-access-k5m9j\") pod \"redhat-marketplace-vltgz\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:36 crc kubenswrapper[4948]: I0120 20:15:36.869957 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:37 crc kubenswrapper[4948]: I0120 20:15:37.454984 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vltgz"] Jan 20 20:15:37 crc kubenswrapper[4948]: W0120 20:15:37.467890 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44dfb10f_cd3e_4c6f_b3ea_f536d0253873.slice/crio-1a64ad42147ddd7e3a1b8d720a3402ca72698b647ce81a73da9152019d799cef WatchSource:0}: Error finding container 1a64ad42147ddd7e3a1b8d720a3402ca72698b647ce81a73da9152019d799cef: Status 404 returned error can't find the container with id 1a64ad42147ddd7e3a1b8d720a3402ca72698b647ce81a73da9152019d799cef Jan 20 20:15:38 crc kubenswrapper[4948]: I0120 20:15:38.445791 4948 generic.go:334] "Generic (PLEG): container finished" podID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerID="c665eafe162d280d1666cdb47a28c2d60791a4f1cc8d44db07a0a6e2475c5104" exitCode=0 Jan 20 20:15:38 crc kubenswrapper[4948]: I0120 20:15:38.445832 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vltgz" event={"ID":"44dfb10f-cd3e-4c6f-b3ea-f536d0253873","Type":"ContainerDied","Data":"c665eafe162d280d1666cdb47a28c2d60791a4f1cc8d44db07a0a6e2475c5104"} Jan 20 20:15:38 crc kubenswrapper[4948]: I0120 20:15:38.446069 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vltgz" event={"ID":"44dfb10f-cd3e-4c6f-b3ea-f536d0253873","Type":"ContainerStarted","Data":"1a64ad42147ddd7e3a1b8d720a3402ca72698b647ce81a73da9152019d799cef"} Jan 20 20:15:39 crc kubenswrapper[4948]: I0120 20:15:39.459298 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vltgz" event={"ID":"44dfb10f-cd3e-4c6f-b3ea-f536d0253873","Type":"ContainerStarted","Data":"88ac0d3773dd627a0747dec3642d1db5a564ca7b7e09ef4bb9c4f00491d76a8d"} Jan 20 20:15:40 crc kubenswrapper[4948]: I0120 20:15:40.470628 4948 generic.go:334] "Generic (PLEG): container finished" podID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerID="88ac0d3773dd627a0747dec3642d1db5a564ca7b7e09ef4bb9c4f00491d76a8d" exitCode=0 Jan 20 20:15:40 crc kubenswrapper[4948]: I0120 20:15:40.470689 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vltgz" event={"ID":"44dfb10f-cd3e-4c6f-b3ea-f536d0253873","Type":"ContainerDied","Data":"88ac0d3773dd627a0747dec3642d1db5a564ca7b7e09ef4bb9c4f00491d76a8d"} Jan 20 20:15:41 crc kubenswrapper[4948]: I0120 20:15:41.483691 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vltgz" event={"ID":"44dfb10f-cd3e-4c6f-b3ea-f536d0253873","Type":"ContainerStarted","Data":"5de11815cb7ca1f9426574150d25fa492b820b4fd6b036d2e83257b655fb0768"} Jan 20 20:15:41 crc kubenswrapper[4948]: I0120 20:15:41.527167 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vltgz" podStartSLOduration=3.043878576 podStartE2EDuration="5.527147095s" podCreationTimestamp="2026-01-20 20:15:36 +0000 UTC" firstStartedPulling="2026-01-20 20:15:38.448010879 +0000 UTC m=+1566.398735848" lastFinishedPulling="2026-01-20 20:15:40.931279398 +0000 UTC m=+1568.882004367" observedRunningTime="2026-01-20 20:15:41.520504925 +0000 UTC m=+1569.471229894" watchObservedRunningTime="2026-01-20 20:15:41.527147095 +0000 UTC m=+1569.477872064" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.115825 4948 scope.go:117] "RemoveContainer" containerID="5d56cd5f8c52843ec4d242cb094fb9fcd3e2b69ba20eedb713be72f2ea4d3d90" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.159408 4948 scope.go:117] "RemoveContainer" containerID="c83e0f39d777297f6e3dc2807a8e05b369b1f4126665bed3026397f23c7a7066" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.201876 4948 scope.go:117] "RemoveContainer" containerID="56cf946b72fd6400f6553e68ff608fc33e326132899c51983ea7068ac01c3a45" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.260311 4948 scope.go:117] "RemoveContainer" containerID="eb6af1732ec62a3656f727a9805834f662bb4918873f2b6262147d59f1b9daec" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.306477 4948 scope.go:117] "RemoveContainer" containerID="c377324355f9239526d0e3fff649587a9f90f4a2f61c332105da841c2a05a87a" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.335802 4948 scope.go:117] "RemoveContainer" containerID="defc9602a3aec24af7b0bcc94383737cda733142f7764368bf590714f79cbedc" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.381344 4948 scope.go:117] "RemoveContainer" containerID="5a68b290623e7026f56160c6093714a427d69ef777dd603d05bfc4bbcc1a68ef" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.403344 4948 scope.go:117] "RemoveContainer" containerID="4d3fb988a1876ed7e13f28cc46ea16777ee911a7ddbf2a6c6561560b10a2a2d7" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.428424 4948 scope.go:117] "RemoveContainer" containerID="ce3bec0a8712e92a4b3d09259b2b9f48aea48bbcb17bba61a24bd447edd4bd71" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.451702 4948 scope.go:117] "RemoveContainer" containerID="c4c10f262615f33b3d0f2b4f178201c8c68bd21518766373085d4d53523b1eae" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.476984 4948 scope.go:117] "RemoveContainer" containerID="11e35f9e35e38f3774a9245fea8df92163ef58a8b0cee8e17f3e329a11eee9a4" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.505128 4948 scope.go:117] "RemoveContainer" containerID="3a3491925eceda3144c2222da6d443c7f8af4a54848aadc137f7c5ff19e4aa48" Jan 20 20:15:43 crc kubenswrapper[4948]: I0120 20:15:43.610208 4948 scope.go:117] "RemoveContainer" containerID="87626e893ab3487cbc6ec1c93cab9ee8078a015e481b31a2490ac8a03a32bc24" Jan 20 20:15:46 crc kubenswrapper[4948]: I0120 20:15:46.871058 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:46 crc kubenswrapper[4948]: I0120 20:15:46.871864 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:46 crc kubenswrapper[4948]: I0120 20:15:46.924226 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:47 crc kubenswrapper[4948]: I0120 20:15:47.033984 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-cc7hs"] Jan 20 20:15:47 crc kubenswrapper[4948]: I0120 20:15:47.047032 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-cc7hs"] Jan 20 20:15:47 crc kubenswrapper[4948]: I0120 20:15:47.605137 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:47 crc kubenswrapper[4948]: I0120 20:15:47.654597 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vltgz"] Jan 20 20:15:48 crc kubenswrapper[4948]: I0120 20:15:48.570998 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:15:48 crc kubenswrapper[4948]: E0120 20:15:48.572856 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:15:48 crc kubenswrapper[4948]: I0120 20:15:48.581952 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dd9b1bc-11ee-4556-8c6a-699196c19ec1" path="/var/lib/kubelet/pods/8dd9b1bc-11ee-4556-8c6a-699196c19ec1/volumes" Jan 20 20:15:49 crc kubenswrapper[4948]: I0120 20:15:49.571010 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vltgz" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerName="registry-server" containerID="cri-o://5de11815cb7ca1f9426574150d25fa492b820b4fd6b036d2e83257b655fb0768" gracePeriod=2 Jan 20 20:15:50 crc kubenswrapper[4948]: I0120 20:15:50.613891 4948 generic.go:334] "Generic (PLEG): container finished" podID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerID="5de11815cb7ca1f9426574150d25fa492b820b4fd6b036d2e83257b655fb0768" exitCode=0 Jan 20 20:15:50 crc kubenswrapper[4948]: I0120 20:15:50.614003 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vltgz" event={"ID":"44dfb10f-cd3e-4c6f-b3ea-f536d0253873","Type":"ContainerDied","Data":"5de11815cb7ca1f9426574150d25fa492b820b4fd6b036d2e83257b655fb0768"} Jan 20 20:15:50 crc kubenswrapper[4948]: I0120 20:15:50.979538 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.098528 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-catalog-content\") pod \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.098660 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5m9j\" (UniqueName: \"kubernetes.io/projected/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-kube-api-access-k5m9j\") pod \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.098743 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-utilities\") pod \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\" (UID: \"44dfb10f-cd3e-4c6f-b3ea-f536d0253873\") " Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.099777 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-utilities" (OuterVolumeSpecName: "utilities") pod "44dfb10f-cd3e-4c6f-b3ea-f536d0253873" (UID: "44dfb10f-cd3e-4c6f-b3ea-f536d0253873"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.105147 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-kube-api-access-k5m9j" (OuterVolumeSpecName: "kube-api-access-k5m9j") pod "44dfb10f-cd3e-4c6f-b3ea-f536d0253873" (UID: "44dfb10f-cd3e-4c6f-b3ea-f536d0253873"). InnerVolumeSpecName "kube-api-access-k5m9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.122447 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "44dfb10f-cd3e-4c6f-b3ea-f536d0253873" (UID: "44dfb10f-cd3e-4c6f-b3ea-f536d0253873"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.210176 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5m9j\" (UniqueName: \"kubernetes.io/projected/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-kube-api-access-k5m9j\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.210213 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.210222 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44dfb10f-cd3e-4c6f-b3ea-f536d0253873-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.629094 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vltgz" event={"ID":"44dfb10f-cd3e-4c6f-b3ea-f536d0253873","Type":"ContainerDied","Data":"1a64ad42147ddd7e3a1b8d720a3402ca72698b647ce81a73da9152019d799cef"} Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.629170 4948 scope.go:117] "RemoveContainer" containerID="5de11815cb7ca1f9426574150d25fa492b820b4fd6b036d2e83257b655fb0768" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.629224 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vltgz" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.666598 4948 scope.go:117] "RemoveContainer" containerID="88ac0d3773dd627a0747dec3642d1db5a564ca7b7e09ef4bb9c4f00491d76a8d" Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.669177 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vltgz"] Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.678132 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vltgz"] Jan 20 20:15:51 crc kubenswrapper[4948]: I0120 20:15:51.688761 4948 scope.go:117] "RemoveContainer" containerID="c665eafe162d280d1666cdb47a28c2d60791a4f1cc8d44db07a0a6e2475c5104" Jan 20 20:15:52 crc kubenswrapper[4948]: I0120 20:15:52.591673 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" path="/var/lib/kubelet/pods/44dfb10f-cd3e-4c6f-b3ea-f536d0253873/volumes" Jan 20 20:15:53 crc kubenswrapper[4948]: I0120 20:15:53.663268 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" event={"ID":"11f8f855-5031-4c77-88c5-07f606419c1f","Type":"ContainerDied","Data":"29bcafe5162380f908606e05b4123f93fcb02c98b477b57de70935e03fe19d4e"} Jan 20 20:15:53 crc kubenswrapper[4948]: I0120 20:15:53.663322 4948 generic.go:334] "Generic (PLEG): container finished" podID="11f8f855-5031-4c77-88c5-07f606419c1f" containerID="29bcafe5162380f908606e05b4123f93fcb02c98b477b57de70935e03fe19d4e" exitCode=0 Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.175493 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.285157 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7rrf\" (UniqueName: \"kubernetes.io/projected/11f8f855-5031-4c77-88c5-07f606419c1f-kube-api-access-l7rrf\") pod \"11f8f855-5031-4c77-88c5-07f606419c1f\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.285240 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-ssh-key-openstack-edpm-ipam\") pod \"11f8f855-5031-4c77-88c5-07f606419c1f\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.286163 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-bootstrap-combined-ca-bundle\") pod \"11f8f855-5031-4c77-88c5-07f606419c1f\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.286245 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-inventory\") pod \"11f8f855-5031-4c77-88c5-07f606419c1f\" (UID: \"11f8f855-5031-4c77-88c5-07f606419c1f\") " Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.291199 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "11f8f855-5031-4c77-88c5-07f606419c1f" (UID: "11f8f855-5031-4c77-88c5-07f606419c1f"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.292044 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11f8f855-5031-4c77-88c5-07f606419c1f-kube-api-access-l7rrf" (OuterVolumeSpecName: "kube-api-access-l7rrf") pod "11f8f855-5031-4c77-88c5-07f606419c1f" (UID: "11f8f855-5031-4c77-88c5-07f606419c1f"). InnerVolumeSpecName "kube-api-access-l7rrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.318268 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-inventory" (OuterVolumeSpecName: "inventory") pod "11f8f855-5031-4c77-88c5-07f606419c1f" (UID: "11f8f855-5031-4c77-88c5-07f606419c1f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.322000 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "11f8f855-5031-4c77-88c5-07f606419c1f" (UID: "11f8f855-5031-4c77-88c5-07f606419c1f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.388573 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.388611 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7rrf\" (UniqueName: \"kubernetes.io/projected/11f8f855-5031-4c77-88c5-07f606419c1f-kube-api-access-l7rrf\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.388623 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.388633 4948 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11f8f855-5031-4c77-88c5-07f606419c1f-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.683958 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" event={"ID":"11f8f855-5031-4c77-88c5-07f606419c1f","Type":"ContainerDied","Data":"5c0b99a99a0239c2882beed44ca36764d3390b904fd39f9e3f033351593bee3b"} Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.684017 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.684064 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c0b99a99a0239c2882beed44ca36764d3390b904fd39f9e3f033351593bee3b" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.850670 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc"] Jan 20 20:15:55 crc kubenswrapper[4948]: E0120 20:15:55.851286 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11f8f855-5031-4c77-88c5-07f606419c1f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.851301 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="11f8f855-5031-4c77-88c5-07f606419c1f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 20 20:15:55 crc kubenswrapper[4948]: E0120 20:15:55.851323 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerName="extract-content" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.851329 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerName="extract-content" Jan 20 20:15:55 crc kubenswrapper[4948]: E0120 20:15:55.851339 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerName="extract-utilities" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.851345 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerName="extract-utilities" Jan 20 20:15:55 crc kubenswrapper[4948]: E0120 20:15:55.851367 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerName="registry-server" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.851373 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerName="registry-server" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.851543 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="11f8f855-5031-4c77-88c5-07f606419c1f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.851563 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="44dfb10f-cd3e-4c6f-b3ea-f536d0253873" containerName="registry-server" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.852228 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.855871 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.856050 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.856243 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.856431 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.867424 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc"] Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.901436 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bj2fn\" (UniqueName: \"kubernetes.io/projected/bdfde737-ff95-41e6-a124-accfa3f24d58-kube-api-access-bj2fn\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.901639 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:55 crc kubenswrapper[4948]: I0120 20:15:55.901797 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:56 crc kubenswrapper[4948]: I0120 20:15:56.002382 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:56 crc kubenswrapper[4948]: I0120 20:15:56.002506 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:56 crc kubenswrapper[4948]: I0120 20:15:56.002554 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bj2fn\" (UniqueName: \"kubernetes.io/projected/bdfde737-ff95-41e6-a124-accfa3f24d58-kube-api-access-bj2fn\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:56 crc kubenswrapper[4948]: I0120 20:15:56.009501 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:56 crc kubenswrapper[4948]: I0120 20:15:56.012328 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:56 crc kubenswrapper[4948]: I0120 20:15:56.026181 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bj2fn\" (UniqueName: \"kubernetes.io/projected/bdfde737-ff95-41e6-a124-accfa3f24d58-kube-api-access-bj2fn\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-x77kc\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:56 crc kubenswrapper[4948]: I0120 20:15:56.170673 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:15:56 crc kubenswrapper[4948]: I0120 20:15:56.797624 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc"] Jan 20 20:15:57 crc kubenswrapper[4948]: I0120 20:15:57.702642 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" event={"ID":"bdfde737-ff95-41e6-a124-accfa3f24d58","Type":"ContainerStarted","Data":"75084f185199bb8bd49249b4fa4a923731ec85c3bc1857bbf0ac8ac801be8ce4"} Jan 20 20:15:58 crc kubenswrapper[4948]: I0120 20:15:58.714476 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" event={"ID":"bdfde737-ff95-41e6-a124-accfa3f24d58","Type":"ContainerStarted","Data":"9dc225cc964424caa31cfa0c84e7431ab44cfcbe8d5d5e217f9ac9018e46e84f"} Jan 20 20:15:58 crc kubenswrapper[4948]: I0120 20:15:58.739439 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" podStartSLOduration=2.221530127 podStartE2EDuration="3.739418705s" podCreationTimestamp="2026-01-20 20:15:55 +0000 UTC" firstStartedPulling="2026-01-20 20:15:56.803585031 +0000 UTC m=+1584.754310010" lastFinishedPulling="2026-01-20 20:15:58.321473619 +0000 UTC m=+1586.272198588" observedRunningTime="2026-01-20 20:15:58.737279584 +0000 UTC m=+1586.688004553" watchObservedRunningTime="2026-01-20 20:15:58.739418705 +0000 UTC m=+1586.690143664" Jan 20 20:16:02 crc kubenswrapper[4948]: I0120 20:16:02.039201 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-fdwn2"] Jan 20 20:16:02 crc kubenswrapper[4948]: I0120 20:16:02.053652 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-fdwn2"] Jan 20 20:16:02 crc kubenswrapper[4948]: I0120 20:16:02.585934 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d96cb8cd-dfa3-4d70-af44-be9627945b5f" path="/var/lib/kubelet/pods/d96cb8cd-dfa3-4d70-af44-be9627945b5f/volumes" Jan 20 20:16:03 crc kubenswrapper[4948]: I0120 20:16:03.571464 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:16:03 crc kubenswrapper[4948]: E0120 20:16:03.572094 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:16:15 crc kubenswrapper[4948]: I0120 20:16:15.571170 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:16:15 crc kubenswrapper[4948]: E0120 20:16:15.572078 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:16:29 crc kubenswrapper[4948]: I0120 20:16:29.050225 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-5dp57"] Jan 20 20:16:29 crc kubenswrapper[4948]: I0120 20:16:29.059094 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-5dp57"] Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.267230 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5bdff"] Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.274979 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.301655 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5bdff"] Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.404680 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj5cx\" (UniqueName: \"kubernetes.io/projected/d38a590f-e88c-4dd8-8bbf-adf42183b68c-kube-api-access-jj5cx\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.404965 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-utilities\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.408916 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-catalog-content\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.512101 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj5cx\" (UniqueName: \"kubernetes.io/projected/d38a590f-e88c-4dd8-8bbf-adf42183b68c-kube-api-access-jj5cx\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.512835 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-utilities\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.512956 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-catalog-content\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.513823 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-utilities\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.513900 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-catalog-content\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.547995 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj5cx\" (UniqueName: \"kubernetes.io/projected/d38a590f-e88c-4dd8-8bbf-adf42183b68c-kube-api-access-jj5cx\") pod \"community-operators-5bdff\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.570801 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:16:30 crc kubenswrapper[4948]: E0120 20:16:30.571297 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.585178 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4d16876-ed2f-4186-801c-48d52e01ac8c" path="/var/lib/kubelet/pods/c4d16876-ed2f-4186-801c-48d52e01ac8c/volumes" Jan 20 20:16:30 crc kubenswrapper[4948]: I0120 20:16:30.607983 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:31 crc kubenswrapper[4948]: I0120 20:16:31.245159 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5bdff"] Jan 20 20:16:31 crc kubenswrapper[4948]: W0120 20:16:31.253358 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd38a590f_e88c_4dd8_8bbf_adf42183b68c.slice/crio-e1abf7758c3f95135bd7d65e917292336f04082fcb8b62f641fc95a79919f85e WatchSource:0}: Error finding container e1abf7758c3f95135bd7d65e917292336f04082fcb8b62f641fc95a79919f85e: Status 404 returned error can't find the container with id e1abf7758c3f95135bd7d65e917292336f04082fcb8b62f641fc95a79919f85e Jan 20 20:16:32 crc kubenswrapper[4948]: I0120 20:16:32.089193 4948 generic.go:334] "Generic (PLEG): container finished" podID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerID="c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43" exitCode=0 Jan 20 20:16:32 crc kubenswrapper[4948]: I0120 20:16:32.089240 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bdff" event={"ID":"d38a590f-e88c-4dd8-8bbf-adf42183b68c","Type":"ContainerDied","Data":"c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43"} Jan 20 20:16:32 crc kubenswrapper[4948]: I0120 20:16:32.089451 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bdff" event={"ID":"d38a590f-e88c-4dd8-8bbf-adf42183b68c","Type":"ContainerStarted","Data":"e1abf7758c3f95135bd7d65e917292336f04082fcb8b62f641fc95a79919f85e"} Jan 20 20:16:34 crc kubenswrapper[4948]: I0120 20:16:34.112731 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bdff" event={"ID":"d38a590f-e88c-4dd8-8bbf-adf42183b68c","Type":"ContainerStarted","Data":"059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c"} Jan 20 20:16:35 crc kubenswrapper[4948]: I0120 20:16:35.125931 4948 generic.go:334] "Generic (PLEG): container finished" podID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerID="059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c" exitCode=0 Jan 20 20:16:35 crc kubenswrapper[4948]: I0120 20:16:35.125992 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bdff" event={"ID":"d38a590f-e88c-4dd8-8bbf-adf42183b68c","Type":"ContainerDied","Data":"059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c"} Jan 20 20:16:36 crc kubenswrapper[4948]: I0120 20:16:36.137958 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bdff" event={"ID":"d38a590f-e88c-4dd8-8bbf-adf42183b68c","Type":"ContainerStarted","Data":"e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f"} Jan 20 20:16:36 crc kubenswrapper[4948]: I0120 20:16:36.176053 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5bdff" podStartSLOduration=2.661744311 podStartE2EDuration="6.176032797s" podCreationTimestamp="2026-01-20 20:16:30 +0000 UTC" firstStartedPulling="2026-01-20 20:16:32.091895089 +0000 UTC m=+1620.042620068" lastFinishedPulling="2026-01-20 20:16:35.606183585 +0000 UTC m=+1623.556908554" observedRunningTime="2026-01-20 20:16:36.165425446 +0000 UTC m=+1624.116150445" watchObservedRunningTime="2026-01-20 20:16:36.176032797 +0000 UTC m=+1624.126757776" Jan 20 20:16:40 crc kubenswrapper[4948]: I0120 20:16:40.059634 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-99f6n"] Jan 20 20:16:40 crc kubenswrapper[4948]: I0120 20:16:40.068897 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-99f6n"] Jan 20 20:16:40 crc kubenswrapper[4948]: I0120 20:16:40.587267 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0fa00dfc-b064-4964-a65d-80809492c96d" path="/var/lib/kubelet/pods/0fa00dfc-b064-4964-a65d-80809492c96d/volumes" Jan 20 20:16:40 crc kubenswrapper[4948]: I0120 20:16:40.608334 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:40 crc kubenswrapper[4948]: I0120 20:16:40.608412 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:40 crc kubenswrapper[4948]: I0120 20:16:40.666435 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:41 crc kubenswrapper[4948]: I0120 20:16:41.231350 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:41 crc kubenswrapper[4948]: I0120 20:16:41.294124 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5bdff"] Jan 20 20:16:43 crc kubenswrapper[4948]: I0120 20:16:43.201248 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5bdff" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerName="registry-server" containerID="cri-o://e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f" gracePeriod=2 Jan 20 20:16:43 crc kubenswrapper[4948]: I0120 20:16:43.570463 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:16:43 crc kubenswrapper[4948]: E0120 20:16:43.571430 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:16:43 crc kubenswrapper[4948]: I0120 20:16:43.937874 4948 scope.go:117] "RemoveContainer" containerID="8333bb56024fda1ea6ab2ff9247306ba41ed96b6942899396893d6dba5549a97" Jan 20 20:16:43 crc kubenswrapper[4948]: I0120 20:16:43.980583 4948 scope.go:117] "RemoveContainer" containerID="21db9b1a1206ebafe6b573d97de0bc3713a5845e199b0d2d20cdcbbab3f1796d" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.051362 4948 scope.go:117] "RemoveContainer" containerID="41b9099addc835da529df8f16b3a0f3f4ac28f84f9ca1ab4cb080c170810471b" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.117474 4948 scope.go:117] "RemoveContainer" containerID="5f03c6d62c705dccc787efee2f93f6e8d2b2f77510a812f0bc73e9f963f47546" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.199088 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.224113 4948 generic.go:334] "Generic (PLEG): container finished" podID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerID="e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f" exitCode=0 Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.224174 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bdff" event={"ID":"d38a590f-e88c-4dd8-8bbf-adf42183b68c","Type":"ContainerDied","Data":"e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f"} Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.224202 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bdff" event={"ID":"d38a590f-e88c-4dd8-8bbf-adf42183b68c","Type":"ContainerDied","Data":"e1abf7758c3f95135bd7d65e917292336f04082fcb8b62f641fc95a79919f85e"} Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.224204 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5bdff" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.224228 4948 scope.go:117] "RemoveContainer" containerID="e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.261640 4948 scope.go:117] "RemoveContainer" containerID="059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.329501 4948 scope.go:117] "RemoveContainer" containerID="c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.354671 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jj5cx\" (UniqueName: \"kubernetes.io/projected/d38a590f-e88c-4dd8-8bbf-adf42183b68c-kube-api-access-jj5cx\") pod \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.354772 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-utilities\") pod \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.354811 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-catalog-content\") pod \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\" (UID: \"d38a590f-e88c-4dd8-8bbf-adf42183b68c\") " Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.356076 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-utilities" (OuterVolumeSpecName: "utilities") pod "d38a590f-e88c-4dd8-8bbf-adf42183b68c" (UID: "d38a590f-e88c-4dd8-8bbf-adf42183b68c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.361853 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d38a590f-e88c-4dd8-8bbf-adf42183b68c-kube-api-access-jj5cx" (OuterVolumeSpecName: "kube-api-access-jj5cx") pod "d38a590f-e88c-4dd8-8bbf-adf42183b68c" (UID: "d38a590f-e88c-4dd8-8bbf-adf42183b68c"). InnerVolumeSpecName "kube-api-access-jj5cx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.417402 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d38a590f-e88c-4dd8-8bbf-adf42183b68c" (UID: "d38a590f-e88c-4dd8-8bbf-adf42183b68c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.435323 4948 scope.go:117] "RemoveContainer" containerID="e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f" Jan 20 20:16:44 crc kubenswrapper[4948]: E0120 20:16:44.435942 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f\": container with ID starting with e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f not found: ID does not exist" containerID="e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.436010 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f"} err="failed to get container status \"e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f\": rpc error: code = NotFound desc = could not find container \"e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f\": container with ID starting with e180101050d5cf25981bcb169048f570c303b54a0a004383b056331ae0d7514f not found: ID does not exist" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.436048 4948 scope.go:117] "RemoveContainer" containerID="059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c" Jan 20 20:16:44 crc kubenswrapper[4948]: E0120 20:16:44.436437 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c\": container with ID starting with 059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c not found: ID does not exist" containerID="059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.436482 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c"} err="failed to get container status \"059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c\": rpc error: code = NotFound desc = could not find container \"059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c\": container with ID starting with 059cadd6544ab1fe8182b2c69bb5c92ea0b6ef0b66a91dd5b6cc3074009bab6c not found: ID does not exist" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.436512 4948 scope.go:117] "RemoveContainer" containerID="c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43" Jan 20 20:16:44 crc kubenswrapper[4948]: E0120 20:16:44.436885 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43\": container with ID starting with c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43 not found: ID does not exist" containerID="c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.436942 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43"} err="failed to get container status \"c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43\": rpc error: code = NotFound desc = could not find container \"c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43\": container with ID starting with c1bf175feef7ff02084263e8398764cbb8d59d87036332cc4015ba640c3fde43 not found: ID does not exist" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.457902 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jj5cx\" (UniqueName: \"kubernetes.io/projected/d38a590f-e88c-4dd8-8bbf-adf42183b68c-kube-api-access-jj5cx\") on node \"crc\" DevicePath \"\"" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.457943 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.457952 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d38a590f-e88c-4dd8-8bbf-adf42183b68c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.567991 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5bdff"] Jan 20 20:16:44 crc kubenswrapper[4948]: I0120 20:16:44.580737 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5bdff"] Jan 20 20:16:46 crc kubenswrapper[4948]: I0120 20:16:46.583155 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" path="/var/lib/kubelet/pods/d38a590f-e88c-4dd8-8bbf-adf42183b68c/volumes" Jan 20 20:16:52 crc kubenswrapper[4948]: I0120 20:16:52.036826 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-hx7kj"] Jan 20 20:16:52 crc kubenswrapper[4948]: I0120 20:16:52.045929 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-hx7kj"] Jan 20 20:16:52 crc kubenswrapper[4948]: I0120 20:16:52.582546 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c230d755-993f-4cc4-b387-992589975cc7" path="/var/lib/kubelet/pods/c230d755-993f-4cc4-b387-992589975cc7/volumes" Jan 20 20:16:57 crc kubenswrapper[4948]: I0120 20:16:57.570027 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:16:57 crc kubenswrapper[4948]: E0120 20:16:57.570789 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:17:06 crc kubenswrapper[4948]: I0120 20:17:06.062997 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-qxsld"] Jan 20 20:17:06 crc kubenswrapper[4948]: I0120 20:17:06.079145 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-qxsld"] Jan 20 20:17:06 crc kubenswrapper[4948]: I0120 20:17:06.582910 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a24a241-d8d2-484c-ae7b-436777e1fddd" path="/var/lib/kubelet/pods/4a24a241-d8d2-484c-ae7b-436777e1fddd/volumes" Jan 20 20:17:08 crc kubenswrapper[4948]: I0120 20:17:08.034539 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-dchk5"] Jan 20 20:17:08 crc kubenswrapper[4948]: I0120 20:17:08.044558 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-dchk5"] Jan 20 20:17:08 crc kubenswrapper[4948]: I0120 20:17:08.584913 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="974e456e-61d1-4c5e-a8c9-9ebbb5246848" path="/var/lib/kubelet/pods/974e456e-61d1-4c5e-a8c9-9ebbb5246848/volumes" Jan 20 20:17:11 crc kubenswrapper[4948]: I0120 20:17:11.570271 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:17:11 crc kubenswrapper[4948]: E0120 20:17:11.570820 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:17:26 crc kubenswrapper[4948]: I0120 20:17:26.570201 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:17:26 crc kubenswrapper[4948]: E0120 20:17:26.571345 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:17:37 crc kubenswrapper[4948]: I0120 20:17:37.570775 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:17:37 crc kubenswrapper[4948]: E0120 20:17:37.571469 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:17:44 crc kubenswrapper[4948]: I0120 20:17:44.397841 4948 scope.go:117] "RemoveContainer" containerID="5c8cff267eece054abb0bed6f832e21378d67433d0359d0efa0a1e57c0898ede" Jan 20 20:17:44 crc kubenswrapper[4948]: I0120 20:17:44.435278 4948 scope.go:117] "RemoveContainer" containerID="7191cc08b8bfa67d24196060b510b4a9e5eb414c25e910fdb77070f33aa9660b" Jan 20 20:17:44 crc kubenswrapper[4948]: I0120 20:17:44.482304 4948 scope.go:117] "RemoveContainer" containerID="3166fa1c233ed00203e5ec4931b40a183731cb06c32aaa5cb427529ecebc197d" Jan 20 20:17:50 crc kubenswrapper[4948]: I0120 20:17:50.570797 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:17:50 crc kubenswrapper[4948]: E0120 20:17:50.571494 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:17:56 crc kubenswrapper[4948]: I0120 20:17:56.299833 4948 generic.go:334] "Generic (PLEG): container finished" podID="bdfde737-ff95-41e6-a124-accfa3f24d58" containerID="9dc225cc964424caa31cfa0c84e7431ab44cfcbe8d5d5e217f9ac9018e46e84f" exitCode=0 Jan 20 20:17:56 crc kubenswrapper[4948]: I0120 20:17:56.299883 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" event={"ID":"bdfde737-ff95-41e6-a124-accfa3f24d58","Type":"ContainerDied","Data":"9dc225cc964424caa31cfa0c84e7431ab44cfcbe8d5d5e217f9ac9018e46e84f"} Jan 20 20:17:57 crc kubenswrapper[4948]: I0120 20:17:57.754466 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:17:57 crc kubenswrapper[4948]: I0120 20:17:57.902346 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-inventory\") pod \"bdfde737-ff95-41e6-a124-accfa3f24d58\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " Jan 20 20:17:57 crc kubenswrapper[4948]: I0120 20:17:57.902590 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-ssh-key-openstack-edpm-ipam\") pod \"bdfde737-ff95-41e6-a124-accfa3f24d58\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " Jan 20 20:17:57 crc kubenswrapper[4948]: I0120 20:17:57.902691 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bj2fn\" (UniqueName: \"kubernetes.io/projected/bdfde737-ff95-41e6-a124-accfa3f24d58-kube-api-access-bj2fn\") pod \"bdfde737-ff95-41e6-a124-accfa3f24d58\" (UID: \"bdfde737-ff95-41e6-a124-accfa3f24d58\") " Jan 20 20:17:57 crc kubenswrapper[4948]: I0120 20:17:57.908616 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdfde737-ff95-41e6-a124-accfa3f24d58-kube-api-access-bj2fn" (OuterVolumeSpecName: "kube-api-access-bj2fn") pod "bdfde737-ff95-41e6-a124-accfa3f24d58" (UID: "bdfde737-ff95-41e6-a124-accfa3f24d58"). InnerVolumeSpecName "kube-api-access-bj2fn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:17:57 crc kubenswrapper[4948]: I0120 20:17:57.934864 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "bdfde737-ff95-41e6-a124-accfa3f24d58" (UID: "bdfde737-ff95-41e6-a124-accfa3f24d58"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:17:57 crc kubenswrapper[4948]: I0120 20:17:57.936413 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-inventory" (OuterVolumeSpecName: "inventory") pod "bdfde737-ff95-41e6-a124-accfa3f24d58" (UID: "bdfde737-ff95-41e6-a124-accfa3f24d58"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.005187 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.005406 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bdfde737-ff95-41e6-a124-accfa3f24d58-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.005479 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bj2fn\" (UniqueName: \"kubernetes.io/projected/bdfde737-ff95-41e6-a124-accfa3f24d58-kube-api-access-bj2fn\") on node \"crc\" DevicePath \"\"" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.321334 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" event={"ID":"bdfde737-ff95-41e6-a124-accfa3f24d58","Type":"ContainerDied","Data":"75084f185199bb8bd49249b4fa4a923731ec85c3bc1857bbf0ac8ac801be8ce4"} Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.321690 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75084f185199bb8bd49249b4fa4a923731ec85c3bc1857bbf0ac8ac801be8ce4" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.321402 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-x77kc" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.418296 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv"] Jan 20 20:17:58 crc kubenswrapper[4948]: E0120 20:17:58.418881 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerName="extract-content" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.418906 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerName="extract-content" Jan 20 20:17:58 crc kubenswrapper[4948]: E0120 20:17:58.418923 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdfde737-ff95-41e6-a124-accfa3f24d58" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.418932 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdfde737-ff95-41e6-a124-accfa3f24d58" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 20 20:17:58 crc kubenswrapper[4948]: E0120 20:17:58.418950 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerName="registry-server" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.418958 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerName="registry-server" Jan 20 20:17:58 crc kubenswrapper[4948]: E0120 20:17:58.418983 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerName="extract-utilities" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.418992 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerName="extract-utilities" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.419236 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdfde737-ff95-41e6-a124-accfa3f24d58" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.419257 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d38a590f-e88c-4dd8-8bbf-adf42183b68c" containerName="registry-server" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.420111 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.422757 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.422848 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.425129 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.425313 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.435531 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv"] Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.514192 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.514310 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.514364 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlcnb\" (UniqueName: \"kubernetes.io/projected/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-kube-api-access-vlcnb\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.678009 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.678324 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.678411 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlcnb\" (UniqueName: \"kubernetes.io/projected/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-kube-api-access-vlcnb\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.688172 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.689468 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.702783 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlcnb\" (UniqueName: \"kubernetes.io/projected/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-kube-api-access-vlcnb\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-52fgv\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:58 crc kubenswrapper[4948]: I0120 20:17:58.746948 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:17:59 crc kubenswrapper[4948]: I0120 20:17:59.054811 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-qlvzm"] Jan 20 20:17:59 crc kubenswrapper[4948]: I0120 20:17:59.065327 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-qlvzm"] Jan 20 20:17:59 crc kubenswrapper[4948]: I0120 20:17:59.302483 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv"] Jan 20 20:17:59 crc kubenswrapper[4948]: I0120 20:17:59.339168 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" event={"ID":"88dba5f2-ff1f-420f-a1cf-e78fd5512d44","Type":"ContainerStarted","Data":"0503fc7ff672d131d041d54664facc811c90c882afdf365d4db2aa4ff4dc017a"} Jan 20 20:18:00 crc kubenswrapper[4948]: I0120 20:18:00.042897 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-pzp8p"] Jan 20 20:18:00 crc kubenswrapper[4948]: I0120 20:18:00.051879 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-pzp8p"] Jan 20 20:18:00 crc kubenswrapper[4948]: I0120 20:18:00.590812 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69739aba-0e18-493d-9957-8b215b4a2eef" path="/var/lib/kubelet/pods/69739aba-0e18-493d-9957-8b215b4a2eef/volumes" Jan 20 20:18:00 crc kubenswrapper[4948]: I0120 20:18:00.592750 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f66c168c-985d-43b6-a53d-5613b7a416cc" path="/var/lib/kubelet/pods/f66c168c-985d-43b6-a53d-5613b7a416cc/volumes" Jan 20 20:18:01 crc kubenswrapper[4948]: I0120 20:18:01.034865 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-r724g"] Jan 20 20:18:01 crc kubenswrapper[4948]: I0120 20:18:01.042165 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-r724g"] Jan 20 20:18:01 crc kubenswrapper[4948]: I0120 20:18:01.362461 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" event={"ID":"88dba5f2-ff1f-420f-a1cf-e78fd5512d44","Type":"ContainerStarted","Data":"1b0a515dd8429af42490a6dd991be0fcbbfa14b1d65b7601e50d2d6de1918ab6"} Jan 20 20:18:01 crc kubenswrapper[4948]: I0120 20:18:01.394474 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" podStartSLOduration=2.345251024 podStartE2EDuration="3.394440881s" podCreationTimestamp="2026-01-20 20:17:58 +0000 UTC" firstStartedPulling="2026-01-20 20:17:59.313736369 +0000 UTC m=+1707.264461338" lastFinishedPulling="2026-01-20 20:18:00.362926226 +0000 UTC m=+1708.313651195" observedRunningTime="2026-01-20 20:18:01.383636913 +0000 UTC m=+1709.334361882" watchObservedRunningTime="2026-01-20 20:18:01.394440881 +0000 UTC m=+1709.345165850" Jan 20 20:18:01 crc kubenswrapper[4948]: I0120 20:18:01.570479 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:18:01 crc kubenswrapper[4948]: E0120 20:18:01.570914 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.053153 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7ec1-account-create-update-269qf"] Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.063582 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-101b-account-create-update-b8krk"] Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.077276 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-28d2-account-create-update-qsqf8"] Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.085859 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-28d2-account-create-update-qsqf8"] Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.093598 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-101b-account-create-update-b8krk"] Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.101208 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7ec1-account-create-update-269qf"] Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.582111 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c5d2212-ff64-4cb5-964a-0fa269bb0249" path="/var/lib/kubelet/pods/2c5d2212-ff64-4cb5-964a-0fa269bb0249/volumes" Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.582856 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d91976f-4b13-453d-8ee1-9614f4d23edc" path="/var/lib/kubelet/pods/4d91976f-4b13-453d-8ee1-9614f4d23edc/volumes" Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.583507 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51e4eded-1818-4696-a425-227ce9bb1750" path="/var/lib/kubelet/pods/51e4eded-1818-4696-a425-227ce9bb1750/volumes" Jan 20 20:18:02 crc kubenswrapper[4948]: I0120 20:18:02.584960 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd73c9ec-8283-44a3-8a72-2fc52180b2df" path="/var/lib/kubelet/pods/bd73c9ec-8283-44a3-8a72-2fc52180b2df/volumes" Jan 20 20:18:13 crc kubenswrapper[4948]: I0120 20:18:13.570920 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:18:13 crc kubenswrapper[4948]: E0120 20:18:13.572123 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:18:26 crc kubenswrapper[4948]: I0120 20:18:26.570384 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:18:26 crc kubenswrapper[4948]: E0120 20:18:26.571213 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:18:41 crc kubenswrapper[4948]: I0120 20:18:41.570219 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:18:41 crc kubenswrapper[4948]: E0120 20:18:41.571050 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:18:44 crc kubenswrapper[4948]: I0120 20:18:44.627019 4948 scope.go:117] "RemoveContainer" containerID="64bc5b2f28dc731eea9464efc9ec35063f827c5a359f7460c5a50500a4c00e18" Jan 20 20:18:44 crc kubenswrapper[4948]: I0120 20:18:44.664955 4948 scope.go:117] "RemoveContainer" containerID="b0c4c89ef8600cc8cabc0c67c87b43a956cda83db560c7c6a4d4c13a84142005" Jan 20 20:18:44 crc kubenswrapper[4948]: I0120 20:18:44.733272 4948 scope.go:117] "RemoveContainer" containerID="08f8ffc93fe751bf13d32f5e10ca0e9ec3390d312d570a3611411ea83a128832" Jan 20 20:18:44 crc kubenswrapper[4948]: I0120 20:18:44.782502 4948 scope.go:117] "RemoveContainer" containerID="d6c35c80791bf13765cbe351ab6738d7a45606c31086bc37aee4022510099afa" Jan 20 20:18:44 crc kubenswrapper[4948]: I0120 20:18:44.834672 4948 scope.go:117] "RemoveContainer" containerID="f842760f17310ee306f18fd6c7dfc7b6c6450b6e940d2118cde72af473823627" Jan 20 20:18:44 crc kubenswrapper[4948]: I0120 20:18:44.904249 4948 scope.go:117] "RemoveContainer" containerID="bce482f8eeeb13a5700a2d2b6a3fc1857951c48729aaba23b374e3ce5522de1d" Jan 20 20:18:54 crc kubenswrapper[4948]: I0120 20:18:54.570975 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:18:54 crc kubenswrapper[4948]: E0120 20:18:54.571832 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:18:57 crc kubenswrapper[4948]: I0120 20:18:57.038040 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xpn28"] Jan 20 20:18:57 crc kubenswrapper[4948]: I0120 20:18:57.049943 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-xpn28"] Jan 20 20:18:58 crc kubenswrapper[4948]: I0120 20:18:58.582730 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6bba308-c57f-4e3a-a2d8-1efb3f1d1844" path="/var/lib/kubelet/pods/b6bba308-c57f-4e3a-a2d8-1efb3f1d1844/volumes" Jan 20 20:19:06 crc kubenswrapper[4948]: I0120 20:19:06.570901 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:19:06 crc kubenswrapper[4948]: E0120 20:19:06.571737 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:19:21 crc kubenswrapper[4948]: I0120 20:19:21.571133 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:19:21 crc kubenswrapper[4948]: E0120 20:19:21.572033 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:19:25 crc kubenswrapper[4948]: I0120 20:19:25.175673 4948 generic.go:334] "Generic (PLEG): container finished" podID="88dba5f2-ff1f-420f-a1cf-e78fd5512d44" containerID="1b0a515dd8429af42490a6dd991be0fcbbfa14b1d65b7601e50d2d6de1918ab6" exitCode=0 Jan 20 20:19:25 crc kubenswrapper[4948]: I0120 20:19:25.175773 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" event={"ID":"88dba5f2-ff1f-420f-a1cf-e78fd5512d44","Type":"ContainerDied","Data":"1b0a515dd8429af42490a6dd991be0fcbbfa14b1d65b7601e50d2d6de1918ab6"} Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.615818 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.713812 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-inventory\") pod \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.713881 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-ssh-key-openstack-edpm-ipam\") pod \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.714066 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlcnb\" (UniqueName: \"kubernetes.io/projected/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-kube-api-access-vlcnb\") pod \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\" (UID: \"88dba5f2-ff1f-420f-a1cf-e78fd5512d44\") " Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.724404 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-kube-api-access-vlcnb" (OuterVolumeSpecName: "kube-api-access-vlcnb") pod "88dba5f2-ff1f-420f-a1cf-e78fd5512d44" (UID: "88dba5f2-ff1f-420f-a1cf-e78fd5512d44"). InnerVolumeSpecName "kube-api-access-vlcnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.745387 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-inventory" (OuterVolumeSpecName: "inventory") pod "88dba5f2-ff1f-420f-a1cf-e78fd5512d44" (UID: "88dba5f2-ff1f-420f-a1cf-e78fd5512d44"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.763979 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "88dba5f2-ff1f-420f-a1cf-e78fd5512d44" (UID: "88dba5f2-ff1f-420f-a1cf-e78fd5512d44"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.816404 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlcnb\" (UniqueName: \"kubernetes.io/projected/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-kube-api-access-vlcnb\") on node \"crc\" DevicePath \"\"" Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.816436 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:19:26 crc kubenswrapper[4948]: I0120 20:19:26.816446 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88dba5f2-ff1f-420f-a1cf-e78fd5512d44-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.073767 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-rxl64"] Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.083591 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-5x5w6"] Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.091688 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-rxl64"] Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.136926 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-5x5w6"] Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.194426 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" event={"ID":"88dba5f2-ff1f-420f-a1cf-e78fd5512d44","Type":"ContainerDied","Data":"0503fc7ff672d131d041d54664facc811c90c882afdf365d4db2aa4ff4dc017a"} Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.194480 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0503fc7ff672d131d041d54664facc811c90c882afdf365d4db2aa4ff4dc017a" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.194583 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-52fgv" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.307694 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg"] Jan 20 20:19:27 crc kubenswrapper[4948]: E0120 20:19:27.308317 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88dba5f2-ff1f-420f-a1cf-e78fd5512d44" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.308347 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="88dba5f2-ff1f-420f-a1cf-e78fd5512d44" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.308679 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="88dba5f2-ff1f-420f-a1cf-e78fd5512d44" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.309540 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.315312 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.315651 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.315866 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.318560 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.321955 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg"] Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.434038 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.434110 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.434156 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnmlt\" (UniqueName: \"kubernetes.io/projected/ada055ea-6aa5-4e75-ad5b-4caec7647608-kube-api-access-dnmlt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.535759 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.535863 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.535896 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnmlt\" (UniqueName: \"kubernetes.io/projected/ada055ea-6aa5-4e75-ad5b-4caec7647608-kube-api-access-dnmlt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.546670 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.549547 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.555942 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnmlt\" (UniqueName: \"kubernetes.io/projected/ada055ea-6aa5-4e75-ad5b-4caec7647608-kube-api-access-dnmlt\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:27 crc kubenswrapper[4948]: I0120 20:19:27.627163 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:28 crc kubenswrapper[4948]: I0120 20:19:28.312595 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg"] Jan 20 20:19:28 crc kubenswrapper[4948]: I0120 20:19:28.582761 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f3d8a46-101e-416b-b8c7-84c53794528e" path="/var/lib/kubelet/pods/6f3d8a46-101e-416b-b8c7-84c53794528e/volumes" Jan 20 20:19:28 crc kubenswrapper[4948]: I0120 20:19:28.583390 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f" path="/var/lib/kubelet/pods/aaf75ea4-52b5-4f20-ab4e-5edd5d86c03f/volumes" Jan 20 20:19:29 crc kubenswrapper[4948]: I0120 20:19:29.246882 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" event={"ID":"ada055ea-6aa5-4e75-ad5b-4caec7647608","Type":"ContainerStarted","Data":"f14007d7d7648009f2f0dedb262370ef75420716bd6734ac0807587222896ec9"} Jan 20 20:19:29 crc kubenswrapper[4948]: I0120 20:19:29.247249 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" event={"ID":"ada055ea-6aa5-4e75-ad5b-4caec7647608","Type":"ContainerStarted","Data":"83ab4849eda575884284f1cfd29806976c9b6101af5edd59e5b70f3ca4cb99a4"} Jan 20 20:19:29 crc kubenswrapper[4948]: I0120 20:19:29.270687 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" podStartSLOduration=1.8061842719999999 podStartE2EDuration="2.270655917s" podCreationTimestamp="2026-01-20 20:19:27 +0000 UTC" firstStartedPulling="2026-01-20 20:19:28.319079475 +0000 UTC m=+1796.269804444" lastFinishedPulling="2026-01-20 20:19:28.78355112 +0000 UTC m=+1796.734276089" observedRunningTime="2026-01-20 20:19:29.263203413 +0000 UTC m=+1797.213928382" watchObservedRunningTime="2026-01-20 20:19:29.270655917 +0000 UTC m=+1797.221380886" Jan 20 20:19:35 crc kubenswrapper[4948]: I0120 20:19:35.300334 4948 generic.go:334] "Generic (PLEG): container finished" podID="ada055ea-6aa5-4e75-ad5b-4caec7647608" containerID="f14007d7d7648009f2f0dedb262370ef75420716bd6734ac0807587222896ec9" exitCode=0 Jan 20 20:19:35 crc kubenswrapper[4948]: I0120 20:19:35.300446 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" event={"ID":"ada055ea-6aa5-4e75-ad5b-4caec7647608","Type":"ContainerDied","Data":"f14007d7d7648009f2f0dedb262370ef75420716bd6734ac0807587222896ec9"} Jan 20 20:19:35 crc kubenswrapper[4948]: I0120 20:19:35.570259 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:19:35 crc kubenswrapper[4948]: E0120 20:19:35.570779 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:19:36 crc kubenswrapper[4948]: I0120 20:19:36.857221 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:36 crc kubenswrapper[4948]: I0120 20:19:36.970953 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-inventory\") pod \"ada055ea-6aa5-4e75-ad5b-4caec7647608\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " Jan 20 20:19:36 crc kubenswrapper[4948]: I0120 20:19:36.971146 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnmlt\" (UniqueName: \"kubernetes.io/projected/ada055ea-6aa5-4e75-ad5b-4caec7647608-kube-api-access-dnmlt\") pod \"ada055ea-6aa5-4e75-ad5b-4caec7647608\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " Jan 20 20:19:36 crc kubenswrapper[4948]: I0120 20:19:36.971181 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-ssh-key-openstack-edpm-ipam\") pod \"ada055ea-6aa5-4e75-ad5b-4caec7647608\" (UID: \"ada055ea-6aa5-4e75-ad5b-4caec7647608\") " Jan 20 20:19:36 crc kubenswrapper[4948]: I0120 20:19:36.979863 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ada055ea-6aa5-4e75-ad5b-4caec7647608-kube-api-access-dnmlt" (OuterVolumeSpecName: "kube-api-access-dnmlt") pod "ada055ea-6aa5-4e75-ad5b-4caec7647608" (UID: "ada055ea-6aa5-4e75-ad5b-4caec7647608"). InnerVolumeSpecName "kube-api-access-dnmlt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.000513 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ada055ea-6aa5-4e75-ad5b-4caec7647608" (UID: "ada055ea-6aa5-4e75-ad5b-4caec7647608"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.002142 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-inventory" (OuterVolumeSpecName: "inventory") pod "ada055ea-6aa5-4e75-ad5b-4caec7647608" (UID: "ada055ea-6aa5-4e75-ad5b-4caec7647608"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.073608 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.074451 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnmlt\" (UniqueName: \"kubernetes.io/projected/ada055ea-6aa5-4e75-ad5b-4caec7647608-kube-api-access-dnmlt\") on node \"crc\" DevicePath \"\"" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.074577 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ada055ea-6aa5-4e75-ad5b-4caec7647608-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.320643 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" event={"ID":"ada055ea-6aa5-4e75-ad5b-4caec7647608","Type":"ContainerDied","Data":"83ab4849eda575884284f1cfd29806976c9b6101af5edd59e5b70f3ca4cb99a4"} Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.321713 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83ab4849eda575884284f1cfd29806976c9b6101af5edd59e5b70f3ca4cb99a4" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.320763 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.432974 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp"] Jan 20 20:19:37 crc kubenswrapper[4948]: E0120 20:19:37.433805 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ada055ea-6aa5-4e75-ad5b-4caec7647608" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.433834 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ada055ea-6aa5-4e75-ad5b-4caec7647608" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.434095 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ada055ea-6aa5-4e75-ad5b-4caec7647608" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.434816 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.450124 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.454315 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.459018 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.471478 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp"] Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.477513 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.594608 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.594697 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.594912 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd64l\" (UniqueName: \"kubernetes.io/projected/a036dc78-f9f1-467a-b272-a45b9280bc99-kube-api-access-pd64l\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.699645 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.699723 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.699766 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd64l\" (UniqueName: \"kubernetes.io/projected/a036dc78-f9f1-467a-b272-a45b9280bc99-kube-api-access-pd64l\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.710681 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.711149 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.746784 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd64l\" (UniqueName: \"kubernetes.io/projected/a036dc78-f9f1-467a-b272-a45b9280bc99-kube-api-access-pd64l\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-gbbgp\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:37 crc kubenswrapper[4948]: I0120 20:19:37.753971 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:19:38 crc kubenswrapper[4948]: I0120 20:19:38.157009 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp"] Jan 20 20:19:38 crc kubenswrapper[4948]: I0120 20:19:38.328696 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" event={"ID":"a036dc78-f9f1-467a-b272-a45b9280bc99","Type":"ContainerStarted","Data":"c80822ab8d580cb977fe1cd0c66a2e4bea69651f1b1e2ae5fad51a1bf2e6b847"} Jan 20 20:19:39 crc kubenswrapper[4948]: I0120 20:19:39.354948 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" event={"ID":"a036dc78-f9f1-467a-b272-a45b9280bc99","Type":"ContainerStarted","Data":"e67c0e40114fd04b6b0c4c7e99f8486cd5829505e33d2f46d86f12db7df22bcd"} Jan 20 20:19:39 crc kubenswrapper[4948]: I0120 20:19:39.384906 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" podStartSLOduration=1.6265262759999999 podStartE2EDuration="2.384886087s" podCreationTimestamp="2026-01-20 20:19:37 +0000 UTC" firstStartedPulling="2026-01-20 20:19:38.162233466 +0000 UTC m=+1806.112958445" lastFinishedPulling="2026-01-20 20:19:38.920593287 +0000 UTC m=+1806.871318256" observedRunningTime="2026-01-20 20:19:39.372934584 +0000 UTC m=+1807.323659553" watchObservedRunningTime="2026-01-20 20:19:39.384886087 +0000 UTC m=+1807.335611056" Jan 20 20:19:45 crc kubenswrapper[4948]: I0120 20:19:45.046614 4948 scope.go:117] "RemoveContainer" containerID="eae9735274d1023e219135a04831bdb15fd72c95cdabbd5a07697e6e6c1a4d16" Jan 20 20:19:45 crc kubenswrapper[4948]: I0120 20:19:45.103310 4948 scope.go:117] "RemoveContainer" containerID="3f11b7d6bf5df6c7dddeebe09c92747c57004301c58997190821908a6fc80272" Jan 20 20:19:45 crc kubenswrapper[4948]: I0120 20:19:45.136849 4948 scope.go:117] "RemoveContainer" containerID="d8039a951a0ffd31640fcbfc7fc01adead996729f2091892336370630606b900" Jan 20 20:19:50 crc kubenswrapper[4948]: I0120 20:19:50.570409 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:19:50 crc kubenswrapper[4948]: E0120 20:19:50.572527 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:20:03 crc kubenswrapper[4948]: I0120 20:20:03.571051 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:20:03 crc kubenswrapper[4948]: E0120 20:20:03.573386 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:20:11 crc kubenswrapper[4948]: I0120 20:20:11.048118 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-gfmgp"] Jan 20 20:20:11 crc kubenswrapper[4948]: I0120 20:20:11.059303 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-gfmgp"] Jan 20 20:20:12 crc kubenswrapper[4948]: I0120 20:20:12.582869 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d2feaec-203c-425a-86bf-c7681f07bafd" path="/var/lib/kubelet/pods/5d2feaec-203c-425a-86bf-c7681f07bafd/volumes" Jan 20 20:20:15 crc kubenswrapper[4948]: I0120 20:20:15.570208 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:20:15 crc kubenswrapper[4948]: E0120 20:20:15.570729 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:20:21 crc kubenswrapper[4948]: I0120 20:20:21.716924 4948 generic.go:334] "Generic (PLEG): container finished" podID="a036dc78-f9f1-467a-b272-a45b9280bc99" containerID="e67c0e40114fd04b6b0c4c7e99f8486cd5829505e33d2f46d86f12db7df22bcd" exitCode=0 Jan 20 20:20:21 crc kubenswrapper[4948]: I0120 20:20:21.717026 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" event={"ID":"a036dc78-f9f1-467a-b272-a45b9280bc99","Type":"ContainerDied","Data":"e67c0e40114fd04b6b0c4c7e99f8486cd5829505e33d2f46d86f12db7df22bcd"} Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.136722 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.272098 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-ssh-key-openstack-edpm-ipam\") pod \"a036dc78-f9f1-467a-b272-a45b9280bc99\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.272656 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pd64l\" (UniqueName: \"kubernetes.io/projected/a036dc78-f9f1-467a-b272-a45b9280bc99-kube-api-access-pd64l\") pod \"a036dc78-f9f1-467a-b272-a45b9280bc99\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.273025 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-inventory\") pod \"a036dc78-f9f1-467a-b272-a45b9280bc99\" (UID: \"a036dc78-f9f1-467a-b272-a45b9280bc99\") " Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.282583 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a036dc78-f9f1-467a-b272-a45b9280bc99-kube-api-access-pd64l" (OuterVolumeSpecName: "kube-api-access-pd64l") pod "a036dc78-f9f1-467a-b272-a45b9280bc99" (UID: "a036dc78-f9f1-467a-b272-a45b9280bc99"). InnerVolumeSpecName "kube-api-access-pd64l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.299479 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a036dc78-f9f1-467a-b272-a45b9280bc99" (UID: "a036dc78-f9f1-467a-b272-a45b9280bc99"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.308178 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-inventory" (OuterVolumeSpecName: "inventory") pod "a036dc78-f9f1-467a-b272-a45b9280bc99" (UID: "a036dc78-f9f1-467a-b272-a45b9280bc99"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.376945 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.376997 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pd64l\" (UniqueName: \"kubernetes.io/projected/a036dc78-f9f1-467a-b272-a45b9280bc99-kube-api-access-pd64l\") on node \"crc\" DevicePath \"\"" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.377020 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a036dc78-f9f1-467a-b272-a45b9280bc99-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.735248 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" event={"ID":"a036dc78-f9f1-467a-b272-a45b9280bc99","Type":"ContainerDied","Data":"c80822ab8d580cb977fe1cd0c66a2e4bea69651f1b1e2ae5fad51a1bf2e6b847"} Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.735784 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c80822ab8d580cb977fe1cd0c66a2e4bea69651f1b1e2ae5fad51a1bf2e6b847" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.735288 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-gbbgp" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.845243 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g"] Jan 20 20:20:23 crc kubenswrapper[4948]: E0120 20:20:23.845640 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a036dc78-f9f1-467a-b272-a45b9280bc99" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.845658 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a036dc78-f9f1-467a-b272-a45b9280bc99" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.845843 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="a036dc78-f9f1-467a-b272-a45b9280bc99" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.846551 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.848680 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.848940 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.852689 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.858315 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.861828 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g"] Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.988679 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.988753 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:23 crc kubenswrapper[4948]: I0120 20:20:23.988783 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcw2v\" (UniqueName: \"kubernetes.io/projected/c43c5ed8-ee74-481a-9b89-30845f8380b8-kube-api-access-bcw2v\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.090674 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.090774 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.090809 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcw2v\" (UniqueName: \"kubernetes.io/projected/c43c5ed8-ee74-481a-9b89-30845f8380b8-kube-api-access-bcw2v\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.099333 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.108573 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.111831 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcw2v\" (UniqueName: \"kubernetes.io/projected/c43c5ed8-ee74-481a-9b89-30845f8380b8-kube-api-access-bcw2v\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-2446g\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.164519 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.735356 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.736383 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g"] Jan 20 20:20:24 crc kubenswrapper[4948]: I0120 20:20:24.749544 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" event={"ID":"c43c5ed8-ee74-481a-9b89-30845f8380b8","Type":"ContainerStarted","Data":"e71d939b79b3c628a506645d8887f527a35783776fa0b7336129e2c1795988b4"} Jan 20 20:20:25 crc kubenswrapper[4948]: I0120 20:20:25.757968 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" event={"ID":"c43c5ed8-ee74-481a-9b89-30845f8380b8","Type":"ContainerStarted","Data":"c31763a6fba6016aeaceafcc88449d55eb4e1fcb16a631104322129684eaac03"} Jan 20 20:20:26 crc kubenswrapper[4948]: I0120 20:20:26.785864 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" podStartSLOduration=3.061735683 podStartE2EDuration="3.785842819s" podCreationTimestamp="2026-01-20 20:20:23 +0000 UTC" firstStartedPulling="2026-01-20 20:20:24.735080548 +0000 UTC m=+1852.685805517" lastFinishedPulling="2026-01-20 20:20:25.459187684 +0000 UTC m=+1853.409912653" observedRunningTime="2026-01-20 20:20:26.780555057 +0000 UTC m=+1854.731280026" watchObservedRunningTime="2026-01-20 20:20:26.785842819 +0000 UTC m=+1854.736567788" Jan 20 20:20:28 crc kubenswrapper[4948]: I0120 20:20:28.570494 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:20:29 crc kubenswrapper[4948]: I0120 20:20:29.795857 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"5cbb7c8430f6645757313c4d6b374566eb7331d9daa136806f9655de7ed9b678"} Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.100945 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6kpr9"] Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.104908 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.113564 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6kpr9"] Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.172330 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-utilities\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.172723 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7g7r\" (UniqueName: \"kubernetes.io/projected/d2afbffb-2711-4130-9949-9e1a30f3cb84-kube-api-access-d7g7r\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.172883 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-catalog-content\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.275359 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-utilities\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.275448 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7g7r\" (UniqueName: \"kubernetes.io/projected/d2afbffb-2711-4130-9949-9e1a30f3cb84-kube-api-access-d7g7r\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.275482 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-catalog-content\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.276086 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-catalog-content\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.276213 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-utilities\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.294968 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7g7r\" (UniqueName: \"kubernetes.io/projected/d2afbffb-2711-4130-9949-9e1a30f3cb84-kube-api-access-d7g7r\") pod \"redhat-operators-6kpr9\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.435454 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:36 crc kubenswrapper[4948]: I0120 20:20:36.948839 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6kpr9"] Jan 20 20:20:37 crc kubenswrapper[4948]: I0120 20:20:37.866323 4948 generic.go:334] "Generic (PLEG): container finished" podID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerID="e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf" exitCode=0 Jan 20 20:20:37 crc kubenswrapper[4948]: I0120 20:20:37.866434 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kpr9" event={"ID":"d2afbffb-2711-4130-9949-9e1a30f3cb84","Type":"ContainerDied","Data":"e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf"} Jan 20 20:20:37 crc kubenswrapper[4948]: I0120 20:20:37.866677 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kpr9" event={"ID":"d2afbffb-2711-4130-9949-9e1a30f3cb84","Type":"ContainerStarted","Data":"a16b73fde5789eda603f4231bc1733b42904490c612530f304062cf4294fba7d"} Jan 20 20:20:39 crc kubenswrapper[4948]: I0120 20:20:39.889592 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kpr9" event={"ID":"d2afbffb-2711-4130-9949-9e1a30f3cb84","Type":"ContainerStarted","Data":"b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6"} Jan 20 20:20:44 crc kubenswrapper[4948]: I0120 20:20:44.950852 4948 generic.go:334] "Generic (PLEG): container finished" podID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerID="b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6" exitCode=0 Jan 20 20:20:44 crc kubenswrapper[4948]: I0120 20:20:44.950924 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kpr9" event={"ID":"d2afbffb-2711-4130-9949-9e1a30f3cb84","Type":"ContainerDied","Data":"b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6"} Jan 20 20:20:45 crc kubenswrapper[4948]: I0120 20:20:45.290111 4948 scope.go:117] "RemoveContainer" containerID="8cc835529b854c5ab517f1ba92dede45b691a9de124e026a24407c65d2235fc2" Jan 20 20:20:45 crc kubenswrapper[4948]: I0120 20:20:45.961520 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kpr9" event={"ID":"d2afbffb-2711-4130-9949-9e1a30f3cb84","Type":"ContainerStarted","Data":"4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d"} Jan 20 20:20:45 crc kubenswrapper[4948]: I0120 20:20:45.986058 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6kpr9" podStartSLOduration=2.148576257 podStartE2EDuration="9.986033123s" podCreationTimestamp="2026-01-20 20:20:36 +0000 UTC" firstStartedPulling="2026-01-20 20:20:37.868528428 +0000 UTC m=+1865.819253397" lastFinishedPulling="2026-01-20 20:20:45.705985294 +0000 UTC m=+1873.656710263" observedRunningTime="2026-01-20 20:20:45.98003832 +0000 UTC m=+1873.930763289" watchObservedRunningTime="2026-01-20 20:20:45.986033123 +0000 UTC m=+1873.936758092" Jan 20 20:20:46 crc kubenswrapper[4948]: I0120 20:20:46.436120 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:46 crc kubenswrapper[4948]: I0120 20:20:46.436530 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:47 crc kubenswrapper[4948]: I0120 20:20:47.483339 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6kpr9" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="registry-server" probeResult="failure" output=< Jan 20 20:20:47 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:20:47 crc kubenswrapper[4948]: > Jan 20 20:20:56 crc kubenswrapper[4948]: I0120 20:20:56.485427 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:56 crc kubenswrapper[4948]: I0120 20:20:56.538849 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:56 crc kubenswrapper[4948]: I0120 20:20:56.729261 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6kpr9"] Jan 20 20:20:58 crc kubenswrapper[4948]: I0120 20:20:58.377735 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6kpr9" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="registry-server" containerID="cri-o://4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d" gracePeriod=2 Jan 20 20:20:58 crc kubenswrapper[4948]: I0120 20:20:58.882472 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.058377 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-utilities\") pod \"d2afbffb-2711-4130-9949-9e1a30f3cb84\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.059077 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-catalog-content\") pod \"d2afbffb-2711-4130-9949-9e1a30f3cb84\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.059159 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7g7r\" (UniqueName: \"kubernetes.io/projected/d2afbffb-2711-4130-9949-9e1a30f3cb84-kube-api-access-d7g7r\") pod \"d2afbffb-2711-4130-9949-9e1a30f3cb84\" (UID: \"d2afbffb-2711-4130-9949-9e1a30f3cb84\") " Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.059480 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-utilities" (OuterVolumeSpecName: "utilities") pod "d2afbffb-2711-4130-9949-9e1a30f3cb84" (UID: "d2afbffb-2711-4130-9949-9e1a30f3cb84"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.059876 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.066185 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2afbffb-2711-4130-9949-9e1a30f3cb84-kube-api-access-d7g7r" (OuterVolumeSpecName: "kube-api-access-d7g7r") pod "d2afbffb-2711-4130-9949-9e1a30f3cb84" (UID: "d2afbffb-2711-4130-9949-9e1a30f3cb84"). InnerVolumeSpecName "kube-api-access-d7g7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.161458 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7g7r\" (UniqueName: \"kubernetes.io/projected/d2afbffb-2711-4130-9949-9e1a30f3cb84-kube-api-access-d7g7r\") on node \"crc\" DevicePath \"\"" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.188498 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2afbffb-2711-4130-9949-9e1a30f3cb84" (UID: "d2afbffb-2711-4130-9949-9e1a30f3cb84"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.262997 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2afbffb-2711-4130-9949-9e1a30f3cb84-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.388640 4948 generic.go:334] "Generic (PLEG): container finished" podID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerID="4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d" exitCode=0 Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.388686 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kpr9" event={"ID":"d2afbffb-2711-4130-9949-9e1a30f3cb84","Type":"ContainerDied","Data":"4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d"} Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.388725 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6kpr9" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.388751 4948 scope.go:117] "RemoveContainer" containerID="4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.388726 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6kpr9" event={"ID":"d2afbffb-2711-4130-9949-9e1a30f3cb84","Type":"ContainerDied","Data":"a16b73fde5789eda603f4231bc1733b42904490c612530f304062cf4294fba7d"} Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.413301 4948 scope.go:117] "RemoveContainer" containerID="b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.436021 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6kpr9"] Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.448183 4948 scope.go:117] "RemoveContainer" containerID="e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.458480 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6kpr9"] Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.508211 4948 scope.go:117] "RemoveContainer" containerID="4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d" Jan 20 20:20:59 crc kubenswrapper[4948]: E0120 20:20:59.508691 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d\": container with ID starting with 4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d not found: ID does not exist" containerID="4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.508734 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d"} err="failed to get container status \"4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d\": rpc error: code = NotFound desc = could not find container \"4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d\": container with ID starting with 4689bec65ed41c92034ea9a21b618197b3e6f1569b1c43a75989fbf130604c0d not found: ID does not exist" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.508759 4948 scope.go:117] "RemoveContainer" containerID="b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6" Jan 20 20:20:59 crc kubenswrapper[4948]: E0120 20:20:59.509004 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6\": container with ID starting with b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6 not found: ID does not exist" containerID="b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.509026 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6"} err="failed to get container status \"b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6\": rpc error: code = NotFound desc = could not find container \"b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6\": container with ID starting with b57d12925d51e069d3d3231f0b15484e7f04d4f75c7608ec20a0c57b975cdcd6 not found: ID does not exist" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.509039 4948 scope.go:117] "RemoveContainer" containerID="e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf" Jan 20 20:20:59 crc kubenswrapper[4948]: E0120 20:20:59.509205 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf\": container with ID starting with e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf not found: ID does not exist" containerID="e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf" Jan 20 20:20:59 crc kubenswrapper[4948]: I0120 20:20:59.509224 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf"} err="failed to get container status \"e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf\": rpc error: code = NotFound desc = could not find container \"e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf\": container with ID starting with e71d90f56356bc968b1dbd110a46df0b6a93e50f42414af4e30a22f1f5b442bf not found: ID does not exist" Jan 20 20:21:00 crc kubenswrapper[4948]: I0120 20:21:00.580929 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" path="/var/lib/kubelet/pods/d2afbffb-2711-4130-9949-9e1a30f3cb84/volumes" Jan 20 20:21:18 crc kubenswrapper[4948]: I0120 20:21:18.563276 4948 generic.go:334] "Generic (PLEG): container finished" podID="c43c5ed8-ee74-481a-9b89-30845f8380b8" containerID="c31763a6fba6016aeaceafcc88449d55eb4e1fcb16a631104322129684eaac03" exitCode=0 Jan 20 20:21:18 crc kubenswrapper[4948]: I0120 20:21:18.563361 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" event={"ID":"c43c5ed8-ee74-481a-9b89-30845f8380b8","Type":"ContainerDied","Data":"c31763a6fba6016aeaceafcc88449d55eb4e1fcb16a631104322129684eaac03"} Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.174570 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.308527 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-inventory\") pod \"c43c5ed8-ee74-481a-9b89-30845f8380b8\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.308621 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcw2v\" (UniqueName: \"kubernetes.io/projected/c43c5ed8-ee74-481a-9b89-30845f8380b8-kube-api-access-bcw2v\") pod \"c43c5ed8-ee74-481a-9b89-30845f8380b8\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.308870 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-ssh-key-openstack-edpm-ipam\") pod \"c43c5ed8-ee74-481a-9b89-30845f8380b8\" (UID: \"c43c5ed8-ee74-481a-9b89-30845f8380b8\") " Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.353037 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c43c5ed8-ee74-481a-9b89-30845f8380b8-kube-api-access-bcw2v" (OuterVolumeSpecName: "kube-api-access-bcw2v") pod "c43c5ed8-ee74-481a-9b89-30845f8380b8" (UID: "c43c5ed8-ee74-481a-9b89-30845f8380b8"). InnerVolumeSpecName "kube-api-access-bcw2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.366527 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-inventory" (OuterVolumeSpecName: "inventory") pod "c43c5ed8-ee74-481a-9b89-30845f8380b8" (UID: "c43c5ed8-ee74-481a-9b89-30845f8380b8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.381312 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c43c5ed8-ee74-481a-9b89-30845f8380b8" (UID: "c43c5ed8-ee74-481a-9b89-30845f8380b8"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.410956 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.410994 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcw2v\" (UniqueName: \"kubernetes.io/projected/c43c5ed8-ee74-481a-9b89-30845f8380b8-kube-api-access-bcw2v\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.411007 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c43c5ed8-ee74-481a-9b89-30845f8380b8-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.589535 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.641490 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-2446g" event={"ID":"c43c5ed8-ee74-481a-9b89-30845f8380b8","Type":"ContainerDied","Data":"e71d939b79b3c628a506645d8887f527a35783776fa0b7336129e2c1795988b4"} Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.641824 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e71d939b79b3c628a506645d8887f527a35783776fa0b7336129e2c1795988b4" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.735506 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-spfvx"] Jan 20 20:21:20 crc kubenswrapper[4948]: E0120 20:21:20.735969 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="registry-server" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.735986 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="registry-server" Jan 20 20:21:20 crc kubenswrapper[4948]: E0120 20:21:20.735998 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c43c5ed8-ee74-481a-9b89-30845f8380b8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.736006 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c43c5ed8-ee74-481a-9b89-30845f8380b8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:20 crc kubenswrapper[4948]: E0120 20:21:20.736024 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="extract-utilities" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.736030 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="extract-utilities" Jan 20 20:21:20 crc kubenswrapper[4948]: E0120 20:21:20.736061 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="extract-content" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.736067 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="extract-content" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.736502 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c43c5ed8-ee74-481a-9b89-30845f8380b8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.736531 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2afbffb-2711-4130-9949-9e1a30f3cb84" containerName="registry-server" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.738496 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.742536 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.742847 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.743086 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.747018 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.749142 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpzrs\" (UniqueName: \"kubernetes.io/projected/fc3ad5c4-f353-42b4-8266-6180aae6f48f-kube-api-access-dpzrs\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.749375 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.749440 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.757868 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-spfvx"] Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.851683 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.851776 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.852002 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpzrs\" (UniqueName: \"kubernetes.io/projected/fc3ad5c4-f353-42b4-8266-6180aae6f48f-kube-api-access-dpzrs\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.857463 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.858479 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:20 crc kubenswrapper[4948]: I0120 20:21:20.877541 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpzrs\" (UniqueName: \"kubernetes.io/projected/fc3ad5c4-f353-42b4-8266-6180aae6f48f-kube-api-access-dpzrs\") pod \"ssh-known-hosts-edpm-deployment-spfvx\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:21 crc kubenswrapper[4948]: I0120 20:21:21.067290 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:21 crc kubenswrapper[4948]: I0120 20:21:21.634254 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-spfvx"] Jan 20 20:21:22 crc kubenswrapper[4948]: I0120 20:21:22.613482 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" event={"ID":"fc3ad5c4-f353-42b4-8266-6180aae6f48f","Type":"ContainerStarted","Data":"0cd27112e3e1d8f666d68d3c9473c5713663d93288693f0de627c6dcab31231b"} Jan 20 20:21:22 crc kubenswrapper[4948]: I0120 20:21:22.613848 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" event={"ID":"fc3ad5c4-f353-42b4-8266-6180aae6f48f","Type":"ContainerStarted","Data":"0dc2af7a10a8f6e1436efe983442957efa9590d2d577ca316b56ef0e3f2884db"} Jan 20 20:21:22 crc kubenswrapper[4948]: I0120 20:21:22.644741 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" podStartSLOduration=2.155719072 podStartE2EDuration="2.644698113s" podCreationTimestamp="2026-01-20 20:21:20 +0000 UTC" firstStartedPulling="2026-01-20 20:21:21.644900843 +0000 UTC m=+1909.595625812" lastFinishedPulling="2026-01-20 20:21:22.133879874 +0000 UTC m=+1910.084604853" observedRunningTime="2026-01-20 20:21:22.639589766 +0000 UTC m=+1910.590314755" watchObservedRunningTime="2026-01-20 20:21:22.644698113 +0000 UTC m=+1910.595423082" Jan 20 20:21:29 crc kubenswrapper[4948]: I0120 20:21:29.670004 4948 generic.go:334] "Generic (PLEG): container finished" podID="fc3ad5c4-f353-42b4-8266-6180aae6f48f" containerID="0cd27112e3e1d8f666d68d3c9473c5713663d93288693f0de627c6dcab31231b" exitCode=0 Jan 20 20:21:29 crc kubenswrapper[4948]: I0120 20:21:29.670051 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" event={"ID":"fc3ad5c4-f353-42b4-8266-6180aae6f48f","Type":"ContainerDied","Data":"0cd27112e3e1d8f666d68d3c9473c5713663d93288693f0de627c6dcab31231b"} Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.136853 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.167404 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-inventory-0\") pod \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.167589 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-ssh-key-openstack-edpm-ipam\") pod \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.167843 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpzrs\" (UniqueName: \"kubernetes.io/projected/fc3ad5c4-f353-42b4-8266-6180aae6f48f-kube-api-access-dpzrs\") pod \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\" (UID: \"fc3ad5c4-f353-42b4-8266-6180aae6f48f\") " Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.174129 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc3ad5c4-f353-42b4-8266-6180aae6f48f-kube-api-access-dpzrs" (OuterVolumeSpecName: "kube-api-access-dpzrs") pod "fc3ad5c4-f353-42b4-8266-6180aae6f48f" (UID: "fc3ad5c4-f353-42b4-8266-6180aae6f48f"). InnerVolumeSpecName "kube-api-access-dpzrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.202022 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "fc3ad5c4-f353-42b4-8266-6180aae6f48f" (UID: "fc3ad5c4-f353-42b4-8266-6180aae6f48f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.205730 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "fc3ad5c4-f353-42b4-8266-6180aae6f48f" (UID: "fc3ad5c4-f353-42b4-8266-6180aae6f48f"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.270497 4948 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.270531 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fc3ad5c4-f353-42b4-8266-6180aae6f48f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.270544 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpzrs\" (UniqueName: \"kubernetes.io/projected/fc3ad5c4-f353-42b4-8266-6180aae6f48f-kube-api-access-dpzrs\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.698329 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" event={"ID":"fc3ad5c4-f353-42b4-8266-6180aae6f48f","Type":"ContainerDied","Data":"0dc2af7a10a8f6e1436efe983442957efa9590d2d577ca316b56ef0e3f2884db"} Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.698377 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-spfvx" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.698375 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dc2af7a10a8f6e1436efe983442957efa9590d2d577ca316b56ef0e3f2884db" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.772106 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms"] Jan 20 20:21:31 crc kubenswrapper[4948]: E0120 20:21:31.772611 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc3ad5c4-f353-42b4-8266-6180aae6f48f" containerName="ssh-known-hosts-edpm-deployment" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.772633 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc3ad5c4-f353-42b4-8266-6180aae6f48f" containerName="ssh-known-hosts-edpm-deployment" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.773109 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc3ad5c4-f353-42b4-8266-6180aae6f48f" containerName="ssh-known-hosts-edpm-deployment" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.773944 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.776412 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.776549 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.776815 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.778044 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.791234 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms"] Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.879768 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.879849 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwj8p\" (UniqueName: \"kubernetes.io/projected/1a69232e-a7d3-43f7-a730-b21ffbf62e38-kube-api-access-jwj8p\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.879894 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.981746 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.993497 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.993605 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwj8p\" (UniqueName: \"kubernetes.io/projected/1a69232e-a7d3-43f7-a730-b21ffbf62e38-kube-api-access-jwj8p\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.994421 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:31 crc kubenswrapper[4948]: I0120 20:21:31.996427 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:32 crc kubenswrapper[4948]: I0120 20:21:32.057552 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwj8p\" (UniqueName: \"kubernetes.io/projected/1a69232e-a7d3-43f7-a730-b21ffbf62e38-kube-api-access-jwj8p\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-kgkms\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:32 crc kubenswrapper[4948]: I0120 20:21:32.096594 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:32 crc kubenswrapper[4948]: I0120 20:21:32.640069 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms"] Jan 20 20:21:32 crc kubenswrapper[4948]: I0120 20:21:32.707342 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" event={"ID":"1a69232e-a7d3-43f7-a730-b21ffbf62e38","Type":"ContainerStarted","Data":"200b0b0bdd7148bf1c2fb402c6c372bdf9f52da248a1c2b0be40a648459e538b"} Jan 20 20:21:33 crc kubenswrapper[4948]: I0120 20:21:33.079771 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:21:33 crc kubenswrapper[4948]: I0120 20:21:33.720060 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" event={"ID":"1a69232e-a7d3-43f7-a730-b21ffbf62e38","Type":"ContainerStarted","Data":"cec24a2b300857c2827715deff0d172cc8860c29ea3f130560b6c8378fa48144"} Jan 20 20:21:33 crc kubenswrapper[4948]: I0120 20:21:33.753012 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" podStartSLOduration=2.324909748 podStartE2EDuration="2.752987497s" podCreationTimestamp="2026-01-20 20:21:31 +0000 UTC" firstStartedPulling="2026-01-20 20:21:32.649425502 +0000 UTC m=+1920.600150471" lastFinishedPulling="2026-01-20 20:21:33.077503251 +0000 UTC m=+1921.028228220" observedRunningTime="2026-01-20 20:21:33.740348954 +0000 UTC m=+1921.691073943" watchObservedRunningTime="2026-01-20 20:21:33.752987497 +0000 UTC m=+1921.703712466" Jan 20 20:21:41 crc kubenswrapper[4948]: I0120 20:21:41.802549 4948 generic.go:334] "Generic (PLEG): container finished" podID="1a69232e-a7d3-43f7-a730-b21ffbf62e38" containerID="cec24a2b300857c2827715deff0d172cc8860c29ea3f130560b6c8378fa48144" exitCode=0 Jan 20 20:21:41 crc kubenswrapper[4948]: I0120 20:21:41.802676 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" event={"ID":"1a69232e-a7d3-43f7-a730-b21ffbf62e38","Type":"ContainerDied","Data":"cec24a2b300857c2827715deff0d172cc8860c29ea3f130560b6c8378fa48144"} Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.235403 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.327052 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-ssh-key-openstack-edpm-ipam\") pod \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.327250 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwj8p\" (UniqueName: \"kubernetes.io/projected/1a69232e-a7d3-43f7-a730-b21ffbf62e38-kube-api-access-jwj8p\") pod \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.327276 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-inventory\") pod \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\" (UID: \"1a69232e-a7d3-43f7-a730-b21ffbf62e38\") " Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.340602 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a69232e-a7d3-43f7-a730-b21ffbf62e38-kube-api-access-jwj8p" (OuterVolumeSpecName: "kube-api-access-jwj8p") pod "1a69232e-a7d3-43f7-a730-b21ffbf62e38" (UID: "1a69232e-a7d3-43f7-a730-b21ffbf62e38"). InnerVolumeSpecName "kube-api-access-jwj8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.358762 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1a69232e-a7d3-43f7-a730-b21ffbf62e38" (UID: "1a69232e-a7d3-43f7-a730-b21ffbf62e38"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.359822 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-inventory" (OuterVolumeSpecName: "inventory") pod "1a69232e-a7d3-43f7-a730-b21ffbf62e38" (UID: "1a69232e-a7d3-43f7-a730-b21ffbf62e38"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.429468 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwj8p\" (UniqueName: \"kubernetes.io/projected/1a69232e-a7d3-43f7-a730-b21ffbf62e38-kube-api-access-jwj8p\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.429509 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.429523 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a69232e-a7d3-43f7-a730-b21ffbf62e38-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.824087 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" event={"ID":"1a69232e-a7d3-43f7-a730-b21ffbf62e38","Type":"ContainerDied","Data":"200b0b0bdd7148bf1c2fb402c6c372bdf9f52da248a1c2b0be40a648459e538b"} Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.824622 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="200b0b0bdd7148bf1c2fb402c6c372bdf9f52da248a1c2b0be40a648459e538b" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.824165 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-kgkms" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.935549 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p"] Jan 20 20:21:43 crc kubenswrapper[4948]: E0120 20:21:43.936444 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a69232e-a7d3-43f7-a730-b21ffbf62e38" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.936465 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a69232e-a7d3-43f7-a730-b21ffbf62e38" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.936694 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a69232e-a7d3-43f7-a730-b21ffbf62e38" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.937679 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.940320 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.941910 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.942070 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.944926 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:21:43 crc kubenswrapper[4948]: I0120 20:21:43.955223 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p"] Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.043436 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ztmc\" (UniqueName: \"kubernetes.io/projected/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-kube-api-access-9ztmc\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.043513 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.043645 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.157139 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.157294 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ztmc\" (UniqueName: \"kubernetes.io/projected/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-kube-api-access-9ztmc\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.157333 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.166061 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.167178 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.189460 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ztmc\" (UniqueName: \"kubernetes.io/projected/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-kube-api-access-9ztmc\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.440729 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:44 crc kubenswrapper[4948]: I0120 20:21:44.950588 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p"] Jan 20 20:21:45 crc kubenswrapper[4948]: I0120 20:21:45.842157 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" event={"ID":"c2713e4e-89b8-4d59-9a34-947cd7af2e0e","Type":"ContainerStarted","Data":"acb104e115b78bb9bf51123976fc6ef116a481f50c48316752bc82949b734af2"} Jan 20 20:21:45 crc kubenswrapper[4948]: I0120 20:21:45.842690 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" event={"ID":"c2713e4e-89b8-4d59-9a34-947cd7af2e0e","Type":"ContainerStarted","Data":"b2585f7ffbf930cf3d61885592f35f418b64162df95db4316ea73bd6f8cbbe7c"} Jan 20 20:21:45 crc kubenswrapper[4948]: I0120 20:21:45.870656 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" podStartSLOduration=2.409162789 podStartE2EDuration="2.870637008s" podCreationTimestamp="2026-01-20 20:21:43 +0000 UTC" firstStartedPulling="2026-01-20 20:21:44.95882623 +0000 UTC m=+1932.909551199" lastFinishedPulling="2026-01-20 20:21:45.420300449 +0000 UTC m=+1933.371025418" observedRunningTime="2026-01-20 20:21:45.860180487 +0000 UTC m=+1933.810905466" watchObservedRunningTime="2026-01-20 20:21:45.870637008 +0000 UTC m=+1933.821361977" Jan 20 20:21:56 crc kubenswrapper[4948]: I0120 20:21:56.231934 4948 generic.go:334] "Generic (PLEG): container finished" podID="c2713e4e-89b8-4d59-9a34-947cd7af2e0e" containerID="acb104e115b78bb9bf51123976fc6ef116a481f50c48316752bc82949b734af2" exitCode=0 Jan 20 20:21:56 crc kubenswrapper[4948]: I0120 20:21:56.232022 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" event={"ID":"c2713e4e-89b8-4d59-9a34-947cd7af2e0e","Type":"ContainerDied","Data":"acb104e115b78bb9bf51123976fc6ef116a481f50c48316752bc82949b734af2"} Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.617006 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.651861 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-ssh-key-openstack-edpm-ipam\") pod \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.651910 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ztmc\" (UniqueName: \"kubernetes.io/projected/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-kube-api-access-9ztmc\") pod \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.652027 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-inventory\") pod \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\" (UID: \"c2713e4e-89b8-4d59-9a34-947cd7af2e0e\") " Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.658771 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-kube-api-access-9ztmc" (OuterVolumeSpecName: "kube-api-access-9ztmc") pod "c2713e4e-89b8-4d59-9a34-947cd7af2e0e" (UID: "c2713e4e-89b8-4d59-9a34-947cd7af2e0e"). InnerVolumeSpecName "kube-api-access-9ztmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.680852 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-inventory" (OuterVolumeSpecName: "inventory") pod "c2713e4e-89b8-4d59-9a34-947cd7af2e0e" (UID: "c2713e4e-89b8-4d59-9a34-947cd7af2e0e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.688266 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c2713e4e-89b8-4d59-9a34-947cd7af2e0e" (UID: "c2713e4e-89b8-4d59-9a34-947cd7af2e0e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.753221 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.753505 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:57 crc kubenswrapper[4948]: I0120 20:21:57.753516 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ztmc\" (UniqueName: \"kubernetes.io/projected/c2713e4e-89b8-4d59-9a34-947cd7af2e0e-kube-api-access-9ztmc\") on node \"crc\" DevicePath \"\"" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.253642 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" event={"ID":"c2713e4e-89b8-4d59-9a34-947cd7af2e0e","Type":"ContainerDied","Data":"b2585f7ffbf930cf3d61885592f35f418b64162df95db4316ea73bd6f8cbbe7c"} Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.253744 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2585f7ffbf930cf3d61885592f35f418b64162df95db4316ea73bd6f8cbbe7c" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.253823 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.346689 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq"] Jan 20 20:21:58 crc kubenswrapper[4948]: E0120 20:21:58.347242 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2713e4e-89b8-4d59-9a34-947cd7af2e0e" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.347270 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2713e4e-89b8-4d59-9a34-947cd7af2e0e" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.347555 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2713e4e-89b8-4d59-9a34-947cd7af2e0e" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.348383 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.353984 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.354785 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.355479 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.355655 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.355812 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.355970 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.356115 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.356976 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.359164 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq"] Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.363926 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.363995 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364040 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364064 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364117 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364139 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8xzc\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-kube-api-access-v8xzc\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364185 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364207 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364227 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364253 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364296 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364334 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364383 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.364409 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.466610 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.466672 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.466883 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467014 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467054 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8xzc\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-kube-api-access-v8xzc\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467162 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467201 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467225 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467291 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467402 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467480 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467571 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467642 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.467745 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.472523 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.472998 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.473264 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.473980 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.474992 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.475420 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.477023 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.477148 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.477800 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.478056 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.478791 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.479019 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.479567 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.484401 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8xzc\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-kube-api-access-v8xzc\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:58 crc kubenswrapper[4948]: I0120 20:21:58.665984 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:21:59 crc kubenswrapper[4948]: I0120 20:21:59.222834 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq"] Jan 20 20:21:59 crc kubenswrapper[4948]: I0120 20:21:59.265066 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" event={"ID":"cf7abc7a-4446-4807-af6e-96711d710f9e","Type":"ContainerStarted","Data":"d982d9cc3a15918778940c368c3039c5f365c46cc33c0ffac016c183227ce088"} Jan 20 20:22:00 crc kubenswrapper[4948]: I0120 20:22:00.275289 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" event={"ID":"cf7abc7a-4446-4807-af6e-96711d710f9e","Type":"ContainerStarted","Data":"41a82de52e035ed79f3ea8ff51b75deb05d409838d0aaef6e075dcf49803c66c"} Jan 20 20:22:37 crc kubenswrapper[4948]: I0120 20:22:37.653885 4948 generic.go:334] "Generic (PLEG): container finished" podID="cf7abc7a-4446-4807-af6e-96711d710f9e" containerID="41a82de52e035ed79f3ea8ff51b75deb05d409838d0aaef6e075dcf49803c66c" exitCode=0 Jan 20 20:22:37 crc kubenswrapper[4948]: I0120 20:22:37.653967 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" event={"ID":"cf7abc7a-4446-4807-af6e-96711d710f9e","Type":"ContainerDied","Data":"41a82de52e035ed79f3ea8ff51b75deb05d409838d0aaef6e075dcf49803c66c"} Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.122349 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272059 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-ovn-default-certs-0\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272112 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-neutron-metadata-combined-ca-bundle\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272221 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8xzc\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-kube-api-access-v8xzc\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272249 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-bootstrap-combined-ca-bundle\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272290 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ssh-key-openstack-edpm-ipam\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272327 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-telemetry-combined-ca-bundle\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272388 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272409 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-repo-setup-combined-ca-bundle\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272445 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ovn-combined-ca-bundle\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272463 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272479 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272512 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-inventory\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272531 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-nova-combined-ca-bundle\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.272568 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-libvirt-combined-ca-bundle\") pod \"cf7abc7a-4446-4807-af6e-96711d710f9e\" (UID: \"cf7abc7a-4446-4807-af6e-96711d710f9e\") " Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.278628 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.278948 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.279452 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.279751 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-kube-api-access-v8xzc" (OuterVolumeSpecName: "kube-api-access-v8xzc") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "kube-api-access-v8xzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.279783 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.280314 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.281190 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.281540 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.282785 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.283217 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.284500 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.291988 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.305248 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.329323 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-inventory" (OuterVolumeSpecName: "inventory") pod "cf7abc7a-4446-4807-af6e-96711d710f9e" (UID: "cf7abc7a-4446-4807-af6e-96711d710f9e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375270 4948 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375311 4948 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375326 4948 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375340 4948 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375353 4948 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375365 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375376 4948 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375423 4948 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375434 4948 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375445 4948 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375456 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8xzc\" (UniqueName: \"kubernetes.io/projected/cf7abc7a-4446-4807-af6e-96711d710f9e-kube-api-access-v8xzc\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375469 4948 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375479 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.375491 4948 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf7abc7a-4446-4807-af6e-96711d710f9e-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.670798 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" event={"ID":"cf7abc7a-4446-4807-af6e-96711d710f9e","Type":"ContainerDied","Data":"d982d9cc3a15918778940c368c3039c5f365c46cc33c0ffac016c183227ce088"} Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.670999 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d982d9cc3a15918778940c368c3039c5f365c46cc33c0ffac016c183227ce088" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.671068 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.783799 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27"] Jan 20 20:22:39 crc kubenswrapper[4948]: E0120 20:22:39.784430 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf7abc7a-4446-4807-af6e-96711d710f9e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.784453 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf7abc7a-4446-4807-af6e-96711d710f9e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.784701 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf7abc7a-4446-4807-af6e-96711d710f9e" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.785353 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.790937 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.790950 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.791032 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.798303 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.802320 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27"] Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.802332 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.885489 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.885589 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.885634 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ee6e6079-b341-4648-b640-da45d2f27ed5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.885668 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqpc6\" (UniqueName: \"kubernetes.io/projected/ee6e6079-b341-4648-b640-da45d2f27ed5-kube-api-access-tqpc6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.885730 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.987727 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.988124 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.988234 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ee6e6079-b341-4648-b640-da45d2f27ed5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.988322 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqpc6\" (UniqueName: \"kubernetes.io/projected/ee6e6079-b341-4648-b640-da45d2f27ed5-kube-api-access-tqpc6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.988413 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.989380 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ee6e6079-b341-4648-b640-da45d2f27ed5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.991959 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.993971 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:39 crc kubenswrapper[4948]: I0120 20:22:39.995571 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:40 crc kubenswrapper[4948]: I0120 20:22:40.006640 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqpc6\" (UniqueName: \"kubernetes.io/projected/ee6e6079-b341-4648-b640-da45d2f27ed5-kube-api-access-tqpc6\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-7tm27\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:40 crc kubenswrapper[4948]: I0120 20:22:40.105006 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:22:40 crc kubenswrapper[4948]: I0120 20:22:40.632029 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27"] Jan 20 20:22:40 crc kubenswrapper[4948]: I0120 20:22:40.680511 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" event={"ID":"ee6e6079-b341-4648-b640-da45d2f27ed5","Type":"ContainerStarted","Data":"951275e256854e03cfa114408b9bbd88bd9a1f3ae98ffce2fbcc61a104e93bb1"} Jan 20 20:22:41 crc kubenswrapper[4948]: I0120 20:22:41.698586 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" event={"ID":"ee6e6079-b341-4648-b640-da45d2f27ed5","Type":"ContainerStarted","Data":"72caeaaafca8f53abd984b929f692303cab4ef12b101b8f49577ed8979c07355"} Jan 20 20:22:41 crc kubenswrapper[4948]: I0120 20:22:41.731818 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" podStartSLOduration=2.332731756 podStartE2EDuration="2.731781198s" podCreationTimestamp="2026-01-20 20:22:39 +0000 UTC" firstStartedPulling="2026-01-20 20:22:40.62983532 +0000 UTC m=+1988.580560289" lastFinishedPulling="2026-01-20 20:22:41.028884762 +0000 UTC m=+1988.979609731" observedRunningTime="2026-01-20 20:22:41.723278563 +0000 UTC m=+1989.674003522" watchObservedRunningTime="2026-01-20 20:22:41.731781198 +0000 UTC m=+1989.682506167" Jan 20 20:22:50 crc kubenswrapper[4948]: I0120 20:22:50.249874 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:22:50 crc kubenswrapper[4948]: I0120 20:22:50.250506 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:23:20 crc kubenswrapper[4948]: I0120 20:23:20.249855 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:23:20 crc kubenswrapper[4948]: I0120 20:23:20.250433 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:23:50 crc kubenswrapper[4948]: I0120 20:23:50.249582 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:23:50 crc kubenswrapper[4948]: I0120 20:23:50.250205 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:23:50 crc kubenswrapper[4948]: I0120 20:23:50.250262 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:23:50 crc kubenswrapper[4948]: I0120 20:23:50.251128 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5cbb7c8430f6645757313c4d6b374566eb7331d9daa136806f9655de7ed9b678"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:23:50 crc kubenswrapper[4948]: I0120 20:23:50.251207 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://5cbb7c8430f6645757313c4d6b374566eb7331d9daa136806f9655de7ed9b678" gracePeriod=600 Jan 20 20:23:50 crc kubenswrapper[4948]: I0120 20:23:50.397073 4948 generic.go:334] "Generic (PLEG): container finished" podID="ee6e6079-b341-4648-b640-da45d2f27ed5" containerID="72caeaaafca8f53abd984b929f692303cab4ef12b101b8f49577ed8979c07355" exitCode=0 Jan 20 20:23:50 crc kubenswrapper[4948]: I0120 20:23:50.397128 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" event={"ID":"ee6e6079-b341-4648-b640-da45d2f27ed5","Type":"ContainerDied","Data":"72caeaaafca8f53abd984b929f692303cab4ef12b101b8f49577ed8979c07355"} Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.407439 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="5cbb7c8430f6645757313c4d6b374566eb7331d9daa136806f9655de7ed9b678" exitCode=0 Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.407503 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"5cbb7c8430f6645757313c4d6b374566eb7331d9daa136806f9655de7ed9b678"} Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.408113 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75"} Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.408147 4948 scope.go:117] "RemoveContainer" containerID="a868d8f253696625e813551173ce1c0e2d3b78fdf6bc9c374843b6ff46e1611f" Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.872116 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.918578 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ee6e6079-b341-4648-b640-da45d2f27ed5-ovncontroller-config-0\") pod \"ee6e6079-b341-4648-b640-da45d2f27ed5\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.918833 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqpc6\" (UniqueName: \"kubernetes.io/projected/ee6e6079-b341-4648-b640-da45d2f27ed5-kube-api-access-tqpc6\") pod \"ee6e6079-b341-4648-b640-da45d2f27ed5\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.926047 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee6e6079-b341-4648-b640-da45d2f27ed5-kube-api-access-tqpc6" (OuterVolumeSpecName: "kube-api-access-tqpc6") pod "ee6e6079-b341-4648-b640-da45d2f27ed5" (UID: "ee6e6079-b341-4648-b640-da45d2f27ed5"). InnerVolumeSpecName "kube-api-access-tqpc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:23:51 crc kubenswrapper[4948]: I0120 20:23:51.950136 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee6e6079-b341-4648-b640-da45d2f27ed5-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "ee6e6079-b341-4648-b640-da45d2f27ed5" (UID: "ee6e6079-b341-4648-b640-da45d2f27ed5"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.020535 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-inventory\") pod \"ee6e6079-b341-4648-b640-da45d2f27ed5\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.020777 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ssh-key-openstack-edpm-ipam\") pod \"ee6e6079-b341-4648-b640-da45d2f27ed5\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.021287 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ovn-combined-ca-bundle\") pod \"ee6e6079-b341-4648-b640-da45d2f27ed5\" (UID: \"ee6e6079-b341-4648-b640-da45d2f27ed5\") " Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.023576 4948 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/ee6e6079-b341-4648-b640-da45d2f27ed5-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.023698 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqpc6\" (UniqueName: \"kubernetes.io/projected/ee6e6079-b341-4648-b640-da45d2f27ed5-kube-api-access-tqpc6\") on node \"crc\" DevicePath \"\"" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.026878 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "ee6e6079-b341-4648-b640-da45d2f27ed5" (UID: "ee6e6079-b341-4648-b640-da45d2f27ed5"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.045681 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ee6e6079-b341-4648-b640-da45d2f27ed5" (UID: "ee6e6079-b341-4648-b640-da45d2f27ed5"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.049687 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-inventory" (OuterVolumeSpecName: "inventory") pod "ee6e6079-b341-4648-b640-da45d2f27ed5" (UID: "ee6e6079-b341-4648-b640-da45d2f27ed5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.125717 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.125756 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.125766 4948 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e6079-b341-4648-b640-da45d2f27ed5-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.419584 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" event={"ID":"ee6e6079-b341-4648-b640-da45d2f27ed5","Type":"ContainerDied","Data":"951275e256854e03cfa114408b9bbd88bd9a1f3ae98ffce2fbcc61a104e93bb1"} Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.419614 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-7tm27" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.419630 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="951275e256854e03cfa114408b9bbd88bd9a1f3ae98ffce2fbcc61a104e93bb1" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.581855 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2"] Jan 20 20:23:52 crc kubenswrapper[4948]: E0120 20:23:52.582274 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee6e6079-b341-4648-b640-da45d2f27ed5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.582307 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee6e6079-b341-4648-b640-da45d2f27ed5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.582596 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee6e6079-b341-4648-b640-da45d2f27ed5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.583436 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.586104 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.586628 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.586765 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.587048 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.587459 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.595459 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.634243 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.634296 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.634376 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.634400 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.634418 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc597\" (UniqueName: \"kubernetes.io/projected/a14c4acd-7573-4e72-9ab4-c1263844f59e-kube-api-access-pc597\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.634466 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.637864 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2"] Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.736094 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.736204 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.736258 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.736375 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.736408 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.736438 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc597\" (UniqueName: \"kubernetes.io/projected/a14c4acd-7573-4e72-9ab4-c1263844f59e-kube-api-access-pc597\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.740311 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.740902 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.741185 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.746544 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.746558 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.756104 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc597\" (UniqueName: \"kubernetes.io/projected/a14c4acd-7573-4e72-9ab4-c1263844f59e-kube-api-access-pc597\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:52 crc kubenswrapper[4948]: I0120 20:23:52.908694 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:23:53 crc kubenswrapper[4948]: I0120 20:23:53.572455 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2"] Jan 20 20:23:54 crc kubenswrapper[4948]: I0120 20:23:54.468902 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" event={"ID":"a14c4acd-7573-4e72-9ab4-c1263844f59e","Type":"ContainerStarted","Data":"c56152ba171d931d0ea19294694360b0a497995b0868149ab6765d424bc6787e"} Jan 20 20:23:54 crc kubenswrapper[4948]: I0120 20:23:54.469173 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" event={"ID":"a14c4acd-7573-4e72-9ab4-c1263844f59e","Type":"ContainerStarted","Data":"762b0a28dc03e8ae2e9e95125719c277928726735221db01b1164cb13db35f28"} Jan 20 20:23:54 crc kubenswrapper[4948]: I0120 20:23:54.505563 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" podStartSLOduration=2.057650578 podStartE2EDuration="2.505324843s" podCreationTimestamp="2026-01-20 20:23:52 +0000 UTC" firstStartedPulling="2026-01-20 20:23:53.590921188 +0000 UTC m=+2061.541646157" lastFinishedPulling="2026-01-20 20:23:54.038595453 +0000 UTC m=+2061.989320422" observedRunningTime="2026-01-20 20:23:54.488001848 +0000 UTC m=+2062.438726817" watchObservedRunningTime="2026-01-20 20:23:54.505324843 +0000 UTC m=+2062.456049812" Jan 20 20:24:47 crc kubenswrapper[4948]: I0120 20:24:47.986467 4948 generic.go:334] "Generic (PLEG): container finished" podID="a14c4acd-7573-4e72-9ab4-c1263844f59e" containerID="c56152ba171d931d0ea19294694360b0a497995b0868149ab6765d424bc6787e" exitCode=0 Jan 20 20:24:47 crc kubenswrapper[4948]: I0120 20:24:47.986644 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" event={"ID":"a14c4acd-7573-4e72-9ab4-c1263844f59e","Type":"ContainerDied","Data":"c56152ba171d931d0ea19294694360b0a497995b0868149ab6765d424bc6787e"} Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.010517 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" event={"ID":"a14c4acd-7573-4e72-9ab4-c1263844f59e","Type":"ContainerDied","Data":"762b0a28dc03e8ae2e9e95125719c277928726735221db01b1164cb13db35f28"} Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.010927 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="762b0a28dc03e8ae2e9e95125719c277928726735221db01b1164cb13db35f28" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.034167 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.181694 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-nova-metadata-neutron-config-0\") pod \"a14c4acd-7573-4e72-9ab4-c1263844f59e\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.181775 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-ssh-key-openstack-edpm-ipam\") pod \"a14c4acd-7573-4e72-9ab4-c1263844f59e\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.181993 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pc597\" (UniqueName: \"kubernetes.io/projected/a14c4acd-7573-4e72-9ab4-c1263844f59e-kube-api-access-pc597\") pod \"a14c4acd-7573-4e72-9ab4-c1263844f59e\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.182038 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-ovn-metadata-agent-neutron-config-0\") pod \"a14c4acd-7573-4e72-9ab4-c1263844f59e\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.182089 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-metadata-combined-ca-bundle\") pod \"a14c4acd-7573-4e72-9ab4-c1263844f59e\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.182127 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-inventory\") pod \"a14c4acd-7573-4e72-9ab4-c1263844f59e\" (UID: \"a14c4acd-7573-4e72-9ab4-c1263844f59e\") " Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.188071 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a14c4acd-7573-4e72-9ab4-c1263844f59e-kube-api-access-pc597" (OuterVolumeSpecName: "kube-api-access-pc597") pod "a14c4acd-7573-4e72-9ab4-c1263844f59e" (UID: "a14c4acd-7573-4e72-9ab4-c1263844f59e"). InnerVolumeSpecName "kube-api-access-pc597". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.188104 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "a14c4acd-7573-4e72-9ab4-c1263844f59e" (UID: "a14c4acd-7573-4e72-9ab4-c1263844f59e"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.209453 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "a14c4acd-7573-4e72-9ab4-c1263844f59e" (UID: "a14c4acd-7573-4e72-9ab4-c1263844f59e"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.210833 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-inventory" (OuterVolumeSpecName: "inventory") pod "a14c4acd-7573-4e72-9ab4-c1263844f59e" (UID: "a14c4acd-7573-4e72-9ab4-c1263844f59e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.220625 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a14c4acd-7573-4e72-9ab4-c1263844f59e" (UID: "a14c4acd-7573-4e72-9ab4-c1263844f59e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.225341 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "a14c4acd-7573-4e72-9ab4-c1263844f59e" (UID: "a14c4acd-7573-4e72-9ab4-c1263844f59e"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.284009 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pc597\" (UniqueName: \"kubernetes.io/projected/a14c4acd-7573-4e72-9ab4-c1263844f59e-kube-api-access-pc597\") on node \"crc\" DevicePath \"\"" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.284055 4948 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.284075 4948 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.284089 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.284104 4948 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:24:50 crc kubenswrapper[4948]: I0120 20:24:50.284116 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a14c4acd-7573-4e72-9ab4-c1263844f59e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.019202 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.149186 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2"] Jan 20 20:24:51 crc kubenswrapper[4948]: E0120 20:24:51.149736 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a14c4acd-7573-4e72-9ab4-c1263844f59e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.149759 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a14c4acd-7573-4e72-9ab4-c1263844f59e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.150011 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="a14c4acd-7573-4e72-9ab4-c1263844f59e" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.150847 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.153338 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.153605 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.153856 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.154347 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.154841 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.169592 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2"] Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.202445 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.202863 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.203009 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.203128 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwtwh\" (UniqueName: \"kubernetes.io/projected/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-kube-api-access-qwtwh\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.203307 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.304601 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.304648 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.304673 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwtwh\" (UniqueName: \"kubernetes.io/projected/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-kube-api-access-qwtwh\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.304729 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.304828 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.310789 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.310907 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.312761 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.318493 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.330990 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwtwh\" (UniqueName: \"kubernetes.io/projected/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-kube-api-access-qwtwh\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:51 crc kubenswrapper[4948]: I0120 20:24:51.469669 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:24:52 crc kubenswrapper[4948]: I0120 20:24:52.094899 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2"] Jan 20 20:24:53 crc kubenswrapper[4948]: I0120 20:24:53.061346 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" event={"ID":"c6149a97-b5c3-4ec7-8b50-fc3a77843b48","Type":"ContainerStarted","Data":"2355f5c7b2ba86d20a78f4dcfed8c3a07f7766f7d5dafae020290001f2135a08"} Jan 20 20:24:53 crc kubenswrapper[4948]: I0120 20:24:53.061686 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" event={"ID":"c6149a97-b5c3-4ec7-8b50-fc3a77843b48","Type":"ContainerStarted","Data":"63906aa9cd03f24a299ed396d6d71eab039a5cb2e8752fb5d5c8d70fd3c08e05"} Jan 20 20:24:53 crc kubenswrapper[4948]: I0120 20:24:53.085927 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" podStartSLOduration=1.519371537 podStartE2EDuration="2.085903405s" podCreationTimestamp="2026-01-20 20:24:51 +0000 UTC" firstStartedPulling="2026-01-20 20:24:52.097513809 +0000 UTC m=+2120.048238778" lastFinishedPulling="2026-01-20 20:24:52.664045677 +0000 UTC m=+2120.614770646" observedRunningTime="2026-01-20 20:24:53.077400163 +0000 UTC m=+2121.028125152" watchObservedRunningTime="2026-01-20 20:24:53.085903405 +0000 UTC m=+2121.036628374" Jan 20 20:25:50 crc kubenswrapper[4948]: I0120 20:25:50.249524 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:25:50 crc kubenswrapper[4948]: I0120 20:25:50.250105 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:26:11 crc kubenswrapper[4948]: I0120 20:26:11.954563 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xmwcp"] Jan 20 20:26:11 crc kubenswrapper[4948]: I0120 20:26:11.959287 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:11 crc kubenswrapper[4948]: I0120 20:26:11.980499 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmwcp"] Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.065457 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-utilities\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.065601 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-catalog-content\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.065645 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwlld\" (UniqueName: \"kubernetes.io/projected/44b9d838-b920-4772-9e4f-c67a43af054e-kube-api-access-cwlld\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.168127 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-utilities\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.168236 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-catalog-content\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.168270 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwlld\" (UniqueName: \"kubernetes.io/projected/44b9d838-b920-4772-9e4f-c67a43af054e-kube-api-access-cwlld\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.169162 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-utilities\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.169177 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-catalog-content\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.187793 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwlld\" (UniqueName: \"kubernetes.io/projected/44b9d838-b920-4772-9e4f-c67a43af054e-kube-api-access-cwlld\") pod \"redhat-marketplace-xmwcp\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.299951 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:12 crc kubenswrapper[4948]: I0120 20:26:12.845165 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmwcp"] Jan 20 20:26:13 crc kubenswrapper[4948]: I0120 20:26:13.158689 4948 generic.go:334] "Generic (PLEG): container finished" podID="44b9d838-b920-4772-9e4f-c67a43af054e" containerID="cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2" exitCode=0 Jan 20 20:26:13 crc kubenswrapper[4948]: I0120 20:26:13.158762 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmwcp" event={"ID":"44b9d838-b920-4772-9e4f-c67a43af054e","Type":"ContainerDied","Data":"cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2"} Jan 20 20:26:13 crc kubenswrapper[4948]: I0120 20:26:13.158809 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmwcp" event={"ID":"44b9d838-b920-4772-9e4f-c67a43af054e","Type":"ContainerStarted","Data":"49972524009eae5218bb18708b4972ad2aa084b2eb669e8a561b9c7cbd6a6964"} Jan 20 20:26:13 crc kubenswrapper[4948]: I0120 20:26:13.161053 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:26:14 crc kubenswrapper[4948]: I0120 20:26:14.168034 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmwcp" event={"ID":"44b9d838-b920-4772-9e4f-c67a43af054e","Type":"ContainerStarted","Data":"b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc"} Jan 20 20:26:15 crc kubenswrapper[4948]: I0120 20:26:15.181442 4948 generic.go:334] "Generic (PLEG): container finished" podID="44b9d838-b920-4772-9e4f-c67a43af054e" containerID="b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc" exitCode=0 Jan 20 20:26:15 crc kubenswrapper[4948]: I0120 20:26:15.181598 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmwcp" event={"ID":"44b9d838-b920-4772-9e4f-c67a43af054e","Type":"ContainerDied","Data":"b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc"} Jan 20 20:26:16 crc kubenswrapper[4948]: I0120 20:26:16.191512 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmwcp" event={"ID":"44b9d838-b920-4772-9e4f-c67a43af054e","Type":"ContainerStarted","Data":"639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe"} Jan 20 20:26:16 crc kubenswrapper[4948]: I0120 20:26:16.215649 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xmwcp" podStartSLOduration=2.740652243 podStartE2EDuration="5.215606522s" podCreationTimestamp="2026-01-20 20:26:11 +0000 UTC" firstStartedPulling="2026-01-20 20:26:13.160809417 +0000 UTC m=+2201.111534386" lastFinishedPulling="2026-01-20 20:26:15.635763696 +0000 UTC m=+2203.586488665" observedRunningTime="2026-01-20 20:26:16.2148294 +0000 UTC m=+2204.165554369" watchObservedRunningTime="2026-01-20 20:26:16.215606522 +0000 UTC m=+2204.166331491" Jan 20 20:26:20 crc kubenswrapper[4948]: I0120 20:26:20.249675 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:26:20 crc kubenswrapper[4948]: I0120 20:26:20.250272 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:26:22 crc kubenswrapper[4948]: I0120 20:26:22.300189 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:22 crc kubenswrapper[4948]: I0120 20:26:22.300526 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:22 crc kubenswrapper[4948]: I0120 20:26:22.350673 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:23 crc kubenswrapper[4948]: I0120 20:26:23.293246 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:23 crc kubenswrapper[4948]: I0120 20:26:23.344069 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmwcp"] Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.261816 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xmwcp" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" containerName="registry-server" containerID="cri-o://639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe" gracePeriod=2 Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.691079 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.774492 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwlld\" (UniqueName: \"kubernetes.io/projected/44b9d838-b920-4772-9e4f-c67a43af054e-kube-api-access-cwlld\") pod \"44b9d838-b920-4772-9e4f-c67a43af054e\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.774547 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-catalog-content\") pod \"44b9d838-b920-4772-9e4f-c67a43af054e\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.774625 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-utilities\") pod \"44b9d838-b920-4772-9e4f-c67a43af054e\" (UID: \"44b9d838-b920-4772-9e4f-c67a43af054e\") " Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.776294 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-utilities" (OuterVolumeSpecName: "utilities") pod "44b9d838-b920-4772-9e4f-c67a43af054e" (UID: "44b9d838-b920-4772-9e4f-c67a43af054e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.782541 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44b9d838-b920-4772-9e4f-c67a43af054e-kube-api-access-cwlld" (OuterVolumeSpecName: "kube-api-access-cwlld") pod "44b9d838-b920-4772-9e4f-c67a43af054e" (UID: "44b9d838-b920-4772-9e4f-c67a43af054e"). InnerVolumeSpecName "kube-api-access-cwlld". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.802861 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "44b9d838-b920-4772-9e4f-c67a43af054e" (UID: "44b9d838-b920-4772-9e4f-c67a43af054e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.876425 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.876469 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwlld\" (UniqueName: \"kubernetes.io/projected/44b9d838-b920-4772-9e4f-c67a43af054e-kube-api-access-cwlld\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:25 crc kubenswrapper[4948]: I0120 20:26:25.876483 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b9d838-b920-4772-9e4f-c67a43af054e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.272548 4948 generic.go:334] "Generic (PLEG): container finished" podID="44b9d838-b920-4772-9e4f-c67a43af054e" containerID="639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe" exitCode=0 Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.272601 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmwcp" event={"ID":"44b9d838-b920-4772-9e4f-c67a43af054e","Type":"ContainerDied","Data":"639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe"} Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.272636 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmwcp" event={"ID":"44b9d838-b920-4772-9e4f-c67a43af054e","Type":"ContainerDied","Data":"49972524009eae5218bb18708b4972ad2aa084b2eb669e8a561b9c7cbd6a6964"} Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.272657 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmwcp" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.272694 4948 scope.go:117] "RemoveContainer" containerID="639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.307271 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmwcp"] Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.317124 4948 scope.go:117] "RemoveContainer" containerID="b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.319289 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmwcp"] Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.336450 4948 scope.go:117] "RemoveContainer" containerID="cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.383194 4948 scope.go:117] "RemoveContainer" containerID="639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe" Jan 20 20:26:26 crc kubenswrapper[4948]: E0120 20:26:26.384501 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe\": container with ID starting with 639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe not found: ID does not exist" containerID="639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.384551 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe"} err="failed to get container status \"639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe\": rpc error: code = NotFound desc = could not find container \"639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe\": container with ID starting with 639095ff056dd021de4464246fd6ae6b546d0590813daca6c49fd440b3a47cfe not found: ID does not exist" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.384577 4948 scope.go:117] "RemoveContainer" containerID="b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc" Jan 20 20:26:26 crc kubenswrapper[4948]: E0120 20:26:26.384924 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc\": container with ID starting with b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc not found: ID does not exist" containerID="b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.384944 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc"} err="failed to get container status \"b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc\": rpc error: code = NotFound desc = could not find container \"b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc\": container with ID starting with b91439b1429649aad953a0e317f4374e9adaaa93a6c657bde907ccb2ee8e6dfc not found: ID does not exist" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.384957 4948 scope.go:117] "RemoveContainer" containerID="cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2" Jan 20 20:26:26 crc kubenswrapper[4948]: E0120 20:26:26.385156 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2\": container with ID starting with cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2 not found: ID does not exist" containerID="cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.385191 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2"} err="failed to get container status \"cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2\": rpc error: code = NotFound desc = could not find container \"cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2\": container with ID starting with cf678c9cdf67c0c1aa64c132262baae2dac07ff875097e1a874e81d30ee10cf2 not found: ID does not exist" Jan 20 20:26:26 crc kubenswrapper[4948]: I0120 20:26:26.581756 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" path="/var/lib/kubelet/pods/44b9d838-b920-4772-9e4f-c67a43af054e/volumes" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.155528 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hwnnd"] Jan 20 20:26:32 crc kubenswrapper[4948]: E0120 20:26:32.156477 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" containerName="registry-server" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.156512 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" containerName="registry-server" Jan 20 20:26:32 crc kubenswrapper[4948]: E0120 20:26:32.156546 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" containerName="extract-utilities" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.156562 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" containerName="extract-utilities" Jan 20 20:26:32 crc kubenswrapper[4948]: E0120 20:26:32.156613 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" containerName="extract-content" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.156626 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" containerName="extract-content" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.156991 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="44b9d838-b920-4772-9e4f-c67a43af054e" containerName="registry-server" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.159242 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.167363 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hwnnd"] Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.238793 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sqf9\" (UniqueName: \"kubernetes.io/projected/e276c3f3-7213-4558-8590-08a781d304f5-kube-api-access-7sqf9\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.238914 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-catalog-content\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.238988 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-utilities\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.341210 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sqf9\" (UniqueName: \"kubernetes.io/projected/e276c3f3-7213-4558-8590-08a781d304f5-kube-api-access-7sqf9\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.341323 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-catalog-content\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.341427 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-utilities\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.342138 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-catalog-content\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.342163 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-utilities\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.361923 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sqf9\" (UniqueName: \"kubernetes.io/projected/e276c3f3-7213-4558-8590-08a781d304f5-kube-api-access-7sqf9\") pod \"certified-operators-hwnnd\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:32 crc kubenswrapper[4948]: I0120 20:26:32.489985 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:33 crc kubenswrapper[4948]: I0120 20:26:33.051792 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hwnnd"] Jan 20 20:26:33 crc kubenswrapper[4948]: I0120 20:26:33.330532 4948 generic.go:334] "Generic (PLEG): container finished" podID="e276c3f3-7213-4558-8590-08a781d304f5" containerID="1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331" exitCode=0 Jan 20 20:26:33 crc kubenswrapper[4948]: I0120 20:26:33.330605 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hwnnd" event={"ID":"e276c3f3-7213-4558-8590-08a781d304f5","Type":"ContainerDied","Data":"1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331"} Jan 20 20:26:33 crc kubenswrapper[4948]: I0120 20:26:33.330838 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hwnnd" event={"ID":"e276c3f3-7213-4558-8590-08a781d304f5","Type":"ContainerStarted","Data":"8ac2f65c330e9f7da470d2f1592bce793003662934ae95869e4e071e20f58588"} Jan 20 20:26:34 crc kubenswrapper[4948]: I0120 20:26:34.343921 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hwnnd" event={"ID":"e276c3f3-7213-4558-8590-08a781d304f5","Type":"ContainerStarted","Data":"d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4"} Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.347552 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bsvk6"] Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.349886 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.365120 4948 generic.go:334] "Generic (PLEG): container finished" podID="e276c3f3-7213-4558-8590-08a781d304f5" containerID="d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4" exitCode=0 Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.365179 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hwnnd" event={"ID":"e276c3f3-7213-4558-8590-08a781d304f5","Type":"ContainerDied","Data":"d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4"} Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.379304 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bsvk6"] Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.492177 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-utilities\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.492339 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-catalog-content\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.492380 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrg9m\" (UniqueName: \"kubernetes.io/projected/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-kube-api-access-wrg9m\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.594349 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrg9m\" (UniqueName: \"kubernetes.io/projected/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-kube-api-access-wrg9m\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.594829 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-utilities\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.595103 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-catalog-content\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.595732 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-catalog-content\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.596231 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-utilities\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.618628 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrg9m\" (UniqueName: \"kubernetes.io/projected/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-kube-api-access-wrg9m\") pod \"community-operators-bsvk6\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:36 crc kubenswrapper[4948]: I0120 20:26:36.734561 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:37 crc kubenswrapper[4948]: I0120 20:26:37.135926 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bsvk6"] Jan 20 20:26:37 crc kubenswrapper[4948]: I0120 20:26:37.374056 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsvk6" event={"ID":"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a","Type":"ContainerStarted","Data":"a066e83b416225380fb0fe83695acd73fda43fdeb07aaddf137f21fbb218ef2a"} Jan 20 20:26:38 crc kubenswrapper[4948]: I0120 20:26:38.384655 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hwnnd" event={"ID":"e276c3f3-7213-4558-8590-08a781d304f5","Type":"ContainerStarted","Data":"f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6"} Jan 20 20:26:38 crc kubenswrapper[4948]: I0120 20:26:38.388838 4948 generic.go:334] "Generic (PLEG): container finished" podID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerID="526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675" exitCode=0 Jan 20 20:26:38 crc kubenswrapper[4948]: I0120 20:26:38.388897 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsvk6" event={"ID":"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a","Type":"ContainerDied","Data":"526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675"} Jan 20 20:26:38 crc kubenswrapper[4948]: I0120 20:26:38.404995 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hwnnd" podStartSLOduration=1.76879589 podStartE2EDuration="6.404978312s" podCreationTimestamp="2026-01-20 20:26:32 +0000 UTC" firstStartedPulling="2026-01-20 20:26:33.332761297 +0000 UTC m=+2221.283486266" lastFinishedPulling="2026-01-20 20:26:37.968943719 +0000 UTC m=+2225.919668688" observedRunningTime="2026-01-20 20:26:38.403868421 +0000 UTC m=+2226.354593390" watchObservedRunningTime="2026-01-20 20:26:38.404978312 +0000 UTC m=+2226.355703281" Jan 20 20:26:39 crc kubenswrapper[4948]: I0120 20:26:39.399723 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsvk6" event={"ID":"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a","Type":"ContainerStarted","Data":"9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda"} Jan 20 20:26:41 crc kubenswrapper[4948]: I0120 20:26:41.419490 4948 generic.go:334] "Generic (PLEG): container finished" podID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerID="9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda" exitCode=0 Jan 20 20:26:41 crc kubenswrapper[4948]: I0120 20:26:41.419574 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsvk6" event={"ID":"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a","Type":"ContainerDied","Data":"9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda"} Jan 20 20:26:42 crc kubenswrapper[4948]: I0120 20:26:42.432332 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsvk6" event={"ID":"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a","Type":"ContainerStarted","Data":"10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f"} Jan 20 20:26:42 crc kubenswrapper[4948]: I0120 20:26:42.458074 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bsvk6" podStartSLOduration=2.889435488 podStartE2EDuration="6.458049715s" podCreationTimestamp="2026-01-20 20:26:36 +0000 UTC" firstStartedPulling="2026-01-20 20:26:38.390627313 +0000 UTC m=+2226.341352282" lastFinishedPulling="2026-01-20 20:26:41.95924153 +0000 UTC m=+2229.909966509" observedRunningTime="2026-01-20 20:26:42.453059853 +0000 UTC m=+2230.403784822" watchObservedRunningTime="2026-01-20 20:26:42.458049715 +0000 UTC m=+2230.408774684" Jan 20 20:26:42 crc kubenswrapper[4948]: I0120 20:26:42.490760 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:42 crc kubenswrapper[4948]: I0120 20:26:42.490824 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:42 crc kubenswrapper[4948]: I0120 20:26:42.536930 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:43 crc kubenswrapper[4948]: I0120 20:26:43.499675 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:45 crc kubenswrapper[4948]: I0120 20:26:45.326943 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hwnnd"] Jan 20 20:26:45 crc kubenswrapper[4948]: I0120 20:26:45.459397 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hwnnd" podUID="e276c3f3-7213-4558-8590-08a781d304f5" containerName="registry-server" containerID="cri-o://f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6" gracePeriod=2 Jan 20 20:26:45 crc kubenswrapper[4948]: I0120 20:26:45.921985 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.120901 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-utilities\") pod \"e276c3f3-7213-4558-8590-08a781d304f5\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.120987 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sqf9\" (UniqueName: \"kubernetes.io/projected/e276c3f3-7213-4558-8590-08a781d304f5-kube-api-access-7sqf9\") pod \"e276c3f3-7213-4558-8590-08a781d304f5\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.121116 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-catalog-content\") pod \"e276c3f3-7213-4558-8590-08a781d304f5\" (UID: \"e276c3f3-7213-4558-8590-08a781d304f5\") " Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.121928 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-utilities" (OuterVolumeSpecName: "utilities") pod "e276c3f3-7213-4558-8590-08a781d304f5" (UID: "e276c3f3-7213-4558-8590-08a781d304f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.129462 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e276c3f3-7213-4558-8590-08a781d304f5-kube-api-access-7sqf9" (OuterVolumeSpecName: "kube-api-access-7sqf9") pod "e276c3f3-7213-4558-8590-08a781d304f5" (UID: "e276c3f3-7213-4558-8590-08a781d304f5"). InnerVolumeSpecName "kube-api-access-7sqf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.175793 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e276c3f3-7213-4558-8590-08a781d304f5" (UID: "e276c3f3-7213-4558-8590-08a781d304f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.224007 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sqf9\" (UniqueName: \"kubernetes.io/projected/e276c3f3-7213-4558-8590-08a781d304f5-kube-api-access-7sqf9\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.224056 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.224067 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e276c3f3-7213-4558-8590-08a781d304f5-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.470771 4948 generic.go:334] "Generic (PLEG): container finished" podID="e276c3f3-7213-4558-8590-08a781d304f5" containerID="f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6" exitCode=0 Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.470810 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hwnnd" event={"ID":"e276c3f3-7213-4558-8590-08a781d304f5","Type":"ContainerDied","Data":"f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6"} Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.470833 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hwnnd" event={"ID":"e276c3f3-7213-4558-8590-08a781d304f5","Type":"ContainerDied","Data":"8ac2f65c330e9f7da470d2f1592bce793003662934ae95869e4e071e20f58588"} Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.470844 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hwnnd" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.470854 4948 scope.go:117] "RemoveContainer" containerID="f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.495635 4948 scope.go:117] "RemoveContainer" containerID="d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.519245 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hwnnd"] Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.551909 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hwnnd"] Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.559626 4948 scope.go:117] "RemoveContainer" containerID="1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.583079 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e276c3f3-7213-4558-8590-08a781d304f5" path="/var/lib/kubelet/pods/e276c3f3-7213-4558-8590-08a781d304f5/volumes" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.588493 4948 scope.go:117] "RemoveContainer" containerID="f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6" Jan 20 20:26:46 crc kubenswrapper[4948]: E0120 20:26:46.589087 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6\": container with ID starting with f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6 not found: ID does not exist" containerID="f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.589295 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6"} err="failed to get container status \"f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6\": rpc error: code = NotFound desc = could not find container \"f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6\": container with ID starting with f735786920dde07d2765aa0aaf2afdcc4d039155e697232d54b30c3a10fd6de6 not found: ID does not exist" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.589321 4948 scope.go:117] "RemoveContainer" containerID="d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4" Jan 20 20:26:46 crc kubenswrapper[4948]: E0120 20:26:46.589631 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4\": container with ID starting with d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4 not found: ID does not exist" containerID="d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.589665 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4"} err="failed to get container status \"d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4\": rpc error: code = NotFound desc = could not find container \"d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4\": container with ID starting with d4978934a271b6b53cc5d28f376ad9e11b8bb99095a7314e08ab81397d727fd4 not found: ID does not exist" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.589724 4948 scope.go:117] "RemoveContainer" containerID="1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331" Jan 20 20:26:46 crc kubenswrapper[4948]: E0120 20:26:46.589970 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331\": container with ID starting with 1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331 not found: ID does not exist" containerID="1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.590018 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331"} err="failed to get container status \"1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331\": rpc error: code = NotFound desc = could not find container \"1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331\": container with ID starting with 1e8f6c40dab9004c0abe3fc44400c74158076f07bdc04d07903db6d34317f331 not found: ID does not exist" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.734668 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.734746 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:46 crc kubenswrapper[4948]: I0120 20:26:46.778744 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:47 crc kubenswrapper[4948]: I0120 20:26:47.532729 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:48 crc kubenswrapper[4948]: I0120 20:26:48.952425 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bsvk6"] Jan 20 20:26:49 crc kubenswrapper[4948]: I0120 20:26:49.496151 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bsvk6" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerName="registry-server" containerID="cri-o://10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f" gracePeriod=2 Jan 20 20:26:49 crc kubenswrapper[4948]: I0120 20:26:49.960924 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.101938 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-utilities\") pod \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.102181 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-catalog-content\") pod \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.102254 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrg9m\" (UniqueName: \"kubernetes.io/projected/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-kube-api-access-wrg9m\") pod \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\" (UID: \"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a\") " Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.102971 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-utilities" (OuterVolumeSpecName: "utilities") pod "d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" (UID: "d2272dc4-8e28-43a2-aeb4-bacf4c03d80a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.111906 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-kube-api-access-wrg9m" (OuterVolumeSpecName: "kube-api-access-wrg9m") pod "d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" (UID: "d2272dc4-8e28-43a2-aeb4-bacf4c03d80a"). InnerVolumeSpecName "kube-api-access-wrg9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.159143 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" (UID: "d2272dc4-8e28-43a2-aeb4-bacf4c03d80a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.205173 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.205456 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrg9m\" (UniqueName: \"kubernetes.io/projected/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-kube-api-access-wrg9m\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.205520 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.250393 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.250455 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.250514 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.251420 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.251497 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" gracePeriod=600 Jan 20 20:26:50 crc kubenswrapper[4948]: E0120 20:26:50.383326 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.508469 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" exitCode=0 Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.508517 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75"} Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.508575 4948 scope.go:117] "RemoveContainer" containerID="5cbb7c8430f6645757313c4d6b374566eb7331d9daa136806f9655de7ed9b678" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.509391 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:26:50 crc kubenswrapper[4948]: E0120 20:26:50.510050 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.517382 4948 generic.go:334] "Generic (PLEG): container finished" podID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerID="10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f" exitCode=0 Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.517430 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsvk6" event={"ID":"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a","Type":"ContainerDied","Data":"10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f"} Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.517450 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bsvk6" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.517459 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bsvk6" event={"ID":"d2272dc4-8e28-43a2-aeb4-bacf4c03d80a","Type":"ContainerDied","Data":"a066e83b416225380fb0fe83695acd73fda43fdeb07aaddf137f21fbb218ef2a"} Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.546844 4948 scope.go:117] "RemoveContainer" containerID="10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.615096 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bsvk6"] Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.626041 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bsvk6"] Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.629325 4948 scope.go:117] "RemoveContainer" containerID="9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.660042 4948 scope.go:117] "RemoveContainer" containerID="526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.693865 4948 scope.go:117] "RemoveContainer" containerID="10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f" Jan 20 20:26:50 crc kubenswrapper[4948]: E0120 20:26:50.694460 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f\": container with ID starting with 10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f not found: ID does not exist" containerID="10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.694500 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f"} err="failed to get container status \"10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f\": rpc error: code = NotFound desc = could not find container \"10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f\": container with ID starting with 10a01de580a93b918dbce3c4f6421d4faf75b065cfffcf249181b13b6097d15f not found: ID does not exist" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.694527 4948 scope.go:117] "RemoveContainer" containerID="9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda" Jan 20 20:26:50 crc kubenswrapper[4948]: E0120 20:26:50.695132 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda\": container with ID starting with 9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda not found: ID does not exist" containerID="9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.695224 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda"} err="failed to get container status \"9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda\": rpc error: code = NotFound desc = could not find container \"9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda\": container with ID starting with 9a52db0ed1de32333057701a87ea918499a93e18699c23320d82fcb29b7a1cda not found: ID does not exist" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.695249 4948 scope.go:117] "RemoveContainer" containerID="526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675" Jan 20 20:26:50 crc kubenswrapper[4948]: E0120 20:26:50.695559 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675\": container with ID starting with 526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675 not found: ID does not exist" containerID="526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675" Jan 20 20:26:50 crc kubenswrapper[4948]: I0120 20:26:50.695585 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675"} err="failed to get container status \"526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675\": rpc error: code = NotFound desc = could not find container \"526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675\": container with ID starting with 526e25ea62847700ccf94ad0d6fa7cc65f7d831bbb2c5c01bd37665736a76675 not found: ID does not exist" Jan 20 20:26:52 crc kubenswrapper[4948]: I0120 20:26:52.624741 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" path="/var/lib/kubelet/pods/d2272dc4-8e28-43a2-aeb4-bacf4c03d80a/volumes" Jan 20 20:27:02 crc kubenswrapper[4948]: I0120 20:27:02.579753 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:27:02 crc kubenswrapper[4948]: E0120 20:27:02.580890 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:27:17 crc kubenswrapper[4948]: I0120 20:27:17.569692 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:27:17 crc kubenswrapper[4948]: E0120 20:27:17.570409 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:27:31 crc kubenswrapper[4948]: I0120 20:27:31.575135 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:27:31 crc kubenswrapper[4948]: E0120 20:27:31.575974 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:27:46 crc kubenswrapper[4948]: I0120 20:27:46.570665 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:27:46 crc kubenswrapper[4948]: E0120 20:27:46.571458 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:28:00 crc kubenswrapper[4948]: I0120 20:28:00.570150 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:28:00 crc kubenswrapper[4948]: E0120 20:28:00.572198 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:28:13 crc kubenswrapper[4948]: I0120 20:28:13.571969 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:28:13 crc kubenswrapper[4948]: E0120 20:28:13.574813 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:28:27 crc kubenswrapper[4948]: I0120 20:28:27.569684 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:28:27 crc kubenswrapper[4948]: E0120 20:28:27.570530 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:28:39 crc kubenswrapper[4948]: I0120 20:28:39.570315 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:28:39 crc kubenswrapper[4948]: E0120 20:28:39.570977 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:28:54 crc kubenswrapper[4948]: I0120 20:28:54.569976 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:28:54 crc kubenswrapper[4948]: E0120 20:28:54.570691 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:29:05 crc kubenswrapper[4948]: I0120 20:29:05.584259 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:29:05 crc kubenswrapper[4948]: E0120 20:29:05.585384 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:29:14 crc kubenswrapper[4948]: I0120 20:29:14.032901 4948 generic.go:334] "Generic (PLEG): container finished" podID="c6149a97-b5c3-4ec7-8b50-fc3a77843b48" containerID="2355f5c7b2ba86d20a78f4dcfed8c3a07f7766f7d5dafae020290001f2135a08" exitCode=0 Jan 20 20:29:14 crc kubenswrapper[4948]: I0120 20:29:14.032996 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" event={"ID":"c6149a97-b5c3-4ec7-8b50-fc3a77843b48","Type":"ContainerDied","Data":"2355f5c7b2ba86d20a78f4dcfed8c3a07f7766f7d5dafae020290001f2135a08"} Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.509118 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.635607 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam\") pod \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.635744 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwtwh\" (UniqueName: \"kubernetes.io/projected/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-kube-api-access-qwtwh\") pod \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.635774 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-inventory\") pod \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.635816 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-secret-0\") pod \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.635855 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-combined-ca-bundle\") pod \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.641565 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-kube-api-access-qwtwh" (OuterVolumeSpecName: "kube-api-access-qwtwh") pod "c6149a97-b5c3-4ec7-8b50-fc3a77843b48" (UID: "c6149a97-b5c3-4ec7-8b50-fc3a77843b48"). InnerVolumeSpecName "kube-api-access-qwtwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.644977 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "c6149a97-b5c3-4ec7-8b50-fc3a77843b48" (UID: "c6149a97-b5c3-4ec7-8b50-fc3a77843b48"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:29:15 crc kubenswrapper[4948]: E0120 20:29:15.671852 4948 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam podName:c6149a97-b5c3-4ec7-8b50-fc3a77843b48 nodeName:}" failed. No retries permitted until 2026-01-20 20:29:16.171811012 +0000 UTC m=+2384.122535981 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key-openstack-edpm-ipam" (UniqueName: "kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam") pod "c6149a97-b5c3-4ec7-8b50-fc3a77843b48" (UID: "c6149a97-b5c3-4ec7-8b50-fc3a77843b48") : error deleting /var/lib/kubelet/pods/c6149a97-b5c3-4ec7-8b50-fc3a77843b48/volume-subpaths: remove /var/lib/kubelet/pods/c6149a97-b5c3-4ec7-8b50-fc3a77843b48/volume-subpaths: no such file or directory Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.675206 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "c6149a97-b5c3-4ec7-8b50-fc3a77843b48" (UID: "c6149a97-b5c3-4ec7-8b50-fc3a77843b48"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.676962 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-inventory" (OuterVolumeSpecName: "inventory") pod "c6149a97-b5c3-4ec7-8b50-fc3a77843b48" (UID: "c6149a97-b5c3-4ec7-8b50-fc3a77843b48"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.738615 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwtwh\" (UniqueName: \"kubernetes.io/projected/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-kube-api-access-qwtwh\") on node \"crc\" DevicePath \"\"" Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.738663 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.738676 4948 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:29:15 crc kubenswrapper[4948]: I0120 20:29:15.738684 4948 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.052122 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" event={"ID":"c6149a97-b5c3-4ec7-8b50-fc3a77843b48","Type":"ContainerDied","Data":"63906aa9cd03f24a299ed396d6d71eab039a5cb2e8752fb5d5c8d70fd3c08e05"} Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.052172 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63906aa9cd03f24a299ed396d6d71eab039a5cb2e8752fb5d5c8d70fd3c08e05" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.052249 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.177354 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p"] Jan 20 20:29:16 crc kubenswrapper[4948]: E0120 20:29:16.178237 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerName="registry-server" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178263 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerName="registry-server" Jan 20 20:29:16 crc kubenswrapper[4948]: E0120 20:29:16.178282 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e276c3f3-7213-4558-8590-08a781d304f5" containerName="registry-server" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178291 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e276c3f3-7213-4558-8590-08a781d304f5" containerName="registry-server" Jan 20 20:29:16 crc kubenswrapper[4948]: E0120 20:29:16.178319 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e276c3f3-7213-4558-8590-08a781d304f5" containerName="extract-content" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178327 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e276c3f3-7213-4558-8590-08a781d304f5" containerName="extract-content" Jan 20 20:29:16 crc kubenswrapper[4948]: E0120 20:29:16.178337 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerName="extract-utilities" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178345 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerName="extract-utilities" Jan 20 20:29:16 crc kubenswrapper[4948]: E0120 20:29:16.178363 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6149a97-b5c3-4ec7-8b50-fc3a77843b48" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178373 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6149a97-b5c3-4ec7-8b50-fc3a77843b48" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 20 20:29:16 crc kubenswrapper[4948]: E0120 20:29:16.178386 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e276c3f3-7213-4558-8590-08a781d304f5" containerName="extract-utilities" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178396 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e276c3f3-7213-4558-8590-08a781d304f5" containerName="extract-utilities" Jan 20 20:29:16 crc kubenswrapper[4948]: E0120 20:29:16.178406 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerName="extract-content" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178414 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerName="extract-content" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178670 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="e276c3f3-7213-4558-8590-08a781d304f5" containerName="registry-server" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178691 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2272dc4-8e28-43a2-aeb4-bacf4c03d80a" containerName="registry-server" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.178741 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6149a97-b5c3-4ec7-8b50-fc3a77843b48" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.179536 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.185424 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.186021 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.186456 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.204235 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p"] Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.246848 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam\") pod \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\" (UID: \"c6149a97-b5c3-4ec7-8b50-fc3a77843b48\") " Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.247502 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45lcp\" (UniqueName: \"kubernetes.io/projected/4bb85740-d63d-4363-91af-c07eecf6ab45-kube-api-access-45lcp\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.247571 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.247675 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.247736 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.247885 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.247921 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.247994 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.248049 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.248100 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.253990 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "c6149a97-b5c3-4ec7-8b50-fc3a77843b48" (UID: "c6149a97-b5c3-4ec7-8b50-fc3a77843b48"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350346 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45lcp\" (UniqueName: \"kubernetes.io/projected/4bb85740-d63d-4363-91af-c07eecf6ab45-kube-api-access-45lcp\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350424 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350479 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350514 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350604 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350622 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350650 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350684 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350732 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.350830 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/c6149a97-b5c3-4ec7-8b50-fc3a77843b48-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.352136 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.354652 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.354861 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.355514 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.355579 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.356363 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.356988 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.357571 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.376353 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45lcp\" (UniqueName: \"kubernetes.io/projected/4bb85740-d63d-4363-91af-c07eecf6ab45-kube-api-access-45lcp\") pod \"nova-edpm-deployment-openstack-edpm-ipam-x5v8p\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.500036 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:29:16 crc kubenswrapper[4948]: I0120 20:29:16.570887 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:29:16 crc kubenswrapper[4948]: E0120 20:29:16.571187 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:29:17 crc kubenswrapper[4948]: I0120 20:29:17.083691 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p"] Jan 20 20:29:18 crc kubenswrapper[4948]: I0120 20:29:18.067652 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" event={"ID":"4bb85740-d63d-4363-91af-c07eecf6ab45","Type":"ContainerStarted","Data":"f9a650c3dd24b3987d22dcc29dec0842ef33386ca6ac31121b8648ab651be73f"} Jan 20 20:29:18 crc kubenswrapper[4948]: I0120 20:29:18.068296 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" event={"ID":"4bb85740-d63d-4363-91af-c07eecf6ab45","Type":"ContainerStarted","Data":"346997676b187c648d6ebfc29520e1e634f9679f0f193d9e7cd2771c97998b0a"} Jan 20 20:29:18 crc kubenswrapper[4948]: I0120 20:29:18.087239 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" podStartSLOduration=1.528494964 podStartE2EDuration="2.087212259s" podCreationTimestamp="2026-01-20 20:29:16 +0000 UTC" firstStartedPulling="2026-01-20 20:29:17.086345064 +0000 UTC m=+2385.037070023" lastFinishedPulling="2026-01-20 20:29:17.645062349 +0000 UTC m=+2385.595787318" observedRunningTime="2026-01-20 20:29:18.083045721 +0000 UTC m=+2386.033770700" watchObservedRunningTime="2026-01-20 20:29:18.087212259 +0000 UTC m=+2386.037937238" Jan 20 20:29:30 crc kubenswrapper[4948]: I0120 20:29:30.570627 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:29:30 crc kubenswrapper[4948]: E0120 20:29:30.572458 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:29:42 crc kubenswrapper[4948]: I0120 20:29:42.576216 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:29:42 crc kubenswrapper[4948]: E0120 20:29:42.576969 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:29:54 crc kubenswrapper[4948]: I0120 20:29:54.570755 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:29:54 crc kubenswrapper[4948]: E0120 20:29:54.571600 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.157824 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr"] Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.159697 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.164105 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.199784 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr"] Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.199864 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.298498 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-config-volume\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.298583 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m49tb\" (UniqueName: \"kubernetes.io/projected/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-kube-api-access-m49tb\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.298822 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-secret-volume\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.400510 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-secret-volume\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.400885 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-config-volume\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.401019 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m49tb\" (UniqueName: \"kubernetes.io/projected/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-kube-api-access-m49tb\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.401627 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-config-volume\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.412545 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-secret-volume\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.429196 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m49tb\" (UniqueName: \"kubernetes.io/projected/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-kube-api-access-m49tb\") pod \"collect-profiles-29482350-4zccr\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:00 crc kubenswrapper[4948]: I0120 20:30:00.511447 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:01 crc kubenswrapper[4948]: I0120 20:30:01.025847 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr"] Jan 20 20:30:01 crc kubenswrapper[4948]: I0120 20:30:01.584411 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" event={"ID":"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce","Type":"ContainerStarted","Data":"b1206a9cb4a061f52f78edbbea417e7a061ddb4b34620f10d5cd118c80e2f879"} Jan 20 20:30:01 crc kubenswrapper[4948]: I0120 20:30:01.584736 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" event={"ID":"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce","Type":"ContainerStarted","Data":"4ddb8b5a1ddfa540affa712e1ea04005fc6d845f4ee1c2294a7e1fa37712a410"} Jan 20 20:30:02 crc kubenswrapper[4948]: I0120 20:30:02.599433 4948 generic.go:334] "Generic (PLEG): container finished" podID="9f3f8ed9-be72-49d7-a206-f8d00a49a5ce" containerID="b1206a9cb4a061f52f78edbbea417e7a061ddb4b34620f10d5cd118c80e2f879" exitCode=0 Jan 20 20:30:02 crc kubenswrapper[4948]: I0120 20:30:02.599613 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" event={"ID":"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce","Type":"ContainerDied","Data":"b1206a9cb4a061f52f78edbbea417e7a061ddb4b34620f10d5cd118c80e2f879"} Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.019183 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.177418 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-secret-volume\") pod \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.177525 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-config-volume\") pod \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.177818 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m49tb\" (UniqueName: \"kubernetes.io/projected/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-kube-api-access-m49tb\") pod \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\" (UID: \"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce\") " Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.178277 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-config-volume" (OuterVolumeSpecName: "config-volume") pod "9f3f8ed9-be72-49d7-a206-f8d00a49a5ce" (UID: "9f3f8ed9-be72-49d7-a206-f8d00a49a5ce"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.185013 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-kube-api-access-m49tb" (OuterVolumeSpecName: "kube-api-access-m49tb") pod "9f3f8ed9-be72-49d7-a206-f8d00a49a5ce" (UID: "9f3f8ed9-be72-49d7-a206-f8d00a49a5ce"). InnerVolumeSpecName "kube-api-access-m49tb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.205544 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9f3f8ed9-be72-49d7-a206-f8d00a49a5ce" (UID: "9f3f8ed9-be72-49d7-a206-f8d00a49a5ce"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.280516 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.280560 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.280576 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m49tb\" (UniqueName: \"kubernetes.io/projected/9f3f8ed9-be72-49d7-a206-f8d00a49a5ce-kube-api-access-m49tb\") on node \"crc\" DevicePath \"\"" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.619351 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" event={"ID":"9f3f8ed9-be72-49d7-a206-f8d00a49a5ce","Type":"ContainerDied","Data":"4ddb8b5a1ddfa540affa712e1ea04005fc6d845f4ee1c2294a7e1fa37712a410"} Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.619397 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4ddb8b5a1ddfa540affa712e1ea04005fc6d845f4ee1c2294a7e1fa37712a410" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.619459 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482350-4zccr" Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.752914 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf"] Jan 20 20:30:04 crc kubenswrapper[4948]: I0120 20:30:04.768032 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482305-7r5qf"] Jan 20 20:30:06 crc kubenswrapper[4948]: I0120 20:30:06.585359 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d4764a2-50ea-421c-9d14-13189740a541" path="/var/lib/kubelet/pods/0d4764a2-50ea-421c-9d14-13189740a541/volumes" Jan 20 20:30:07 crc kubenswrapper[4948]: I0120 20:30:07.570428 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:30:07 crc kubenswrapper[4948]: E0120 20:30:07.570980 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:30:19 crc kubenswrapper[4948]: I0120 20:30:19.570559 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:30:19 crc kubenswrapper[4948]: E0120 20:30:19.571533 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:30:32 crc kubenswrapper[4948]: I0120 20:30:32.576159 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:30:32 crc kubenswrapper[4948]: E0120 20:30:32.577212 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:30:45 crc kubenswrapper[4948]: I0120 20:30:45.592455 4948 scope.go:117] "RemoveContainer" containerID="fee25ea7a9b28716b72c16edbca7af14b564a44ee895168fea54cb0273c2a921" Jan 20 20:30:47 crc kubenswrapper[4948]: I0120 20:30:47.570851 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:30:47 crc kubenswrapper[4948]: E0120 20:30:47.571658 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:31:01 crc kubenswrapper[4948]: I0120 20:31:01.570908 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:31:01 crc kubenswrapper[4948]: E0120 20:31:01.571539 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:31:15 crc kubenswrapper[4948]: I0120 20:31:15.571343 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:31:15 crc kubenswrapper[4948]: E0120 20:31:15.572949 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.134669 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-67sc6"] Jan 20 20:31:17 crc kubenswrapper[4948]: E0120 20:31:17.135601 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f3f8ed9-be72-49d7-a206-f8d00a49a5ce" containerName="collect-profiles" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.135619 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f3f8ed9-be72-49d7-a206-f8d00a49a5ce" containerName="collect-profiles" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.135896 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f3f8ed9-be72-49d7-a206-f8d00a49a5ce" containerName="collect-profiles" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.139690 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.163923 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-67sc6"] Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.195280 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txcb9\" (UniqueName: \"kubernetes.io/projected/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-kube-api-access-txcb9\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.195578 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-utilities\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.195658 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-catalog-content\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.297938 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txcb9\" (UniqueName: \"kubernetes.io/projected/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-kube-api-access-txcb9\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.298120 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-utilities\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.298164 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-catalog-content\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.298812 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-utilities\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.298838 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-catalog-content\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.330016 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txcb9\" (UniqueName: \"kubernetes.io/projected/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-kube-api-access-txcb9\") pod \"redhat-operators-67sc6\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.467831 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:17 crc kubenswrapper[4948]: I0120 20:31:17.975539 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-67sc6"] Jan 20 20:31:18 crc kubenswrapper[4948]: I0120 20:31:18.276279 4948 generic.go:334] "Generic (PLEG): container finished" podID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerID="67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805" exitCode=0 Jan 20 20:31:18 crc kubenswrapper[4948]: I0120 20:31:18.276467 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67sc6" event={"ID":"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b","Type":"ContainerDied","Data":"67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805"} Jan 20 20:31:18 crc kubenswrapper[4948]: I0120 20:31:18.277255 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67sc6" event={"ID":"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b","Type":"ContainerStarted","Data":"8383f20296e16d97a1ea27a9777824b05dc53392b92c03ec1dd41123f8100e8f"} Jan 20 20:31:18 crc kubenswrapper[4948]: I0120 20:31:18.278385 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:31:19 crc kubenswrapper[4948]: I0120 20:31:19.289349 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67sc6" event={"ID":"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b","Type":"ContainerStarted","Data":"9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd"} Jan 20 20:31:24 crc kubenswrapper[4948]: I0120 20:31:24.336677 4948 generic.go:334] "Generic (PLEG): container finished" podID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerID="9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd" exitCode=0 Jan 20 20:31:24 crc kubenswrapper[4948]: I0120 20:31:24.336740 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67sc6" event={"ID":"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b","Type":"ContainerDied","Data":"9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd"} Jan 20 20:31:25 crc kubenswrapper[4948]: I0120 20:31:25.349519 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67sc6" event={"ID":"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b","Type":"ContainerStarted","Data":"fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58"} Jan 20 20:31:25 crc kubenswrapper[4948]: I0120 20:31:25.372492 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-67sc6" podStartSLOduration=1.7336905 podStartE2EDuration="8.372474128s" podCreationTimestamp="2026-01-20 20:31:17 +0000 UTC" firstStartedPulling="2026-01-20 20:31:18.278100457 +0000 UTC m=+2506.228825426" lastFinishedPulling="2026-01-20 20:31:24.916884075 +0000 UTC m=+2512.867609054" observedRunningTime="2026-01-20 20:31:25.370332107 +0000 UTC m=+2513.321057096" watchObservedRunningTime="2026-01-20 20:31:25.372474128 +0000 UTC m=+2513.323199097" Jan 20 20:31:27 crc kubenswrapper[4948]: I0120 20:31:27.468214 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:27 crc kubenswrapper[4948]: I0120 20:31:27.468598 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:28 crc kubenswrapper[4948]: I0120 20:31:28.510926 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-67sc6" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="registry-server" probeResult="failure" output=< Jan 20 20:31:28 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:31:28 crc kubenswrapper[4948]: > Jan 20 20:31:30 crc kubenswrapper[4948]: I0120 20:31:30.575685 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:31:30 crc kubenswrapper[4948]: E0120 20:31:30.576261 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:31:37 crc kubenswrapper[4948]: I0120 20:31:37.511428 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:37 crc kubenswrapper[4948]: I0120 20:31:37.564099 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:37 crc kubenswrapper[4948]: I0120 20:31:37.755688 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-67sc6"] Jan 20 20:31:39 crc kubenswrapper[4948]: I0120 20:31:39.478749 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-67sc6" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="registry-server" containerID="cri-o://fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58" gracePeriod=2 Jan 20 20:31:39 crc kubenswrapper[4948]: I0120 20:31:39.969207 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.094125 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-catalog-content\") pod \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.094273 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txcb9\" (UniqueName: \"kubernetes.io/projected/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-kube-api-access-txcb9\") pod \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.094360 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-utilities\") pod \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\" (UID: \"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b\") " Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.095346 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-utilities" (OuterVolumeSpecName: "utilities") pod "bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" (UID: "bd12cbb5-30a9-49d2-98e6-9c2e87a3640b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.100924 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-kube-api-access-txcb9" (OuterVolumeSpecName: "kube-api-access-txcb9") pod "bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" (UID: "bd12cbb5-30a9-49d2-98e6-9c2e87a3640b"). InnerVolumeSpecName "kube-api-access-txcb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.196730 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txcb9\" (UniqueName: \"kubernetes.io/projected/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-kube-api-access-txcb9\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.196765 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.224315 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" (UID: "bd12cbb5-30a9-49d2-98e6-9c2e87a3640b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.298903 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.494535 4948 generic.go:334] "Generic (PLEG): container finished" podID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerID="fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58" exitCode=0 Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.494583 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67sc6" event={"ID":"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b","Type":"ContainerDied","Data":"fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58"} Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.494613 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-67sc6" event={"ID":"bd12cbb5-30a9-49d2-98e6-9c2e87a3640b","Type":"ContainerDied","Data":"8383f20296e16d97a1ea27a9777824b05dc53392b92c03ec1dd41123f8100e8f"} Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.494632 4948 scope.go:117] "RemoveContainer" containerID="fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.494629 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-67sc6" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.520435 4948 scope.go:117] "RemoveContainer" containerID="9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.545395 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-67sc6"] Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.553322 4948 scope.go:117] "RemoveContainer" containerID="67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.556661 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-67sc6"] Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.581082 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" path="/var/lib/kubelet/pods/bd12cbb5-30a9-49d2-98e6-9c2e87a3640b/volumes" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.596660 4948 scope.go:117] "RemoveContainer" containerID="fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58" Jan 20 20:31:40 crc kubenswrapper[4948]: E0120 20:31:40.599184 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58\": container with ID starting with fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58 not found: ID does not exist" containerID="fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.599233 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58"} err="failed to get container status \"fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58\": rpc error: code = NotFound desc = could not find container \"fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58\": container with ID starting with fdf3f29fb092047b8ac3c0fc403272429a4ac3d003ba22ad23b17142da1fbc58 not found: ID does not exist" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.599266 4948 scope.go:117] "RemoveContainer" containerID="9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd" Jan 20 20:31:40 crc kubenswrapper[4948]: E0120 20:31:40.602123 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd\": container with ID starting with 9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd not found: ID does not exist" containerID="9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.602170 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd"} err="failed to get container status \"9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd\": rpc error: code = NotFound desc = could not find container \"9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd\": container with ID starting with 9b872e960f4db4dd839d27326cc583a5128ab62b33c79b37b401072897bd35bd not found: ID does not exist" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.602204 4948 scope.go:117] "RemoveContainer" containerID="67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805" Jan 20 20:31:40 crc kubenswrapper[4948]: E0120 20:31:40.602531 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805\": container with ID starting with 67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805 not found: ID does not exist" containerID="67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805" Jan 20 20:31:40 crc kubenswrapper[4948]: I0120 20:31:40.602555 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805"} err="failed to get container status \"67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805\": rpc error: code = NotFound desc = could not find container \"67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805\": container with ID starting with 67b0cff69d0ee3a3350bd29106c3d7c1b39b900bc0b2916408dc61966d792805 not found: ID does not exist" Jan 20 20:31:40 crc kubenswrapper[4948]: E0120 20:31:40.695572 4948 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd12cbb5_30a9_49d2_98e6_9c2e87a3640b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd12cbb5_30a9_49d2_98e6_9c2e87a3640b.slice/crio-8383f20296e16d97a1ea27a9777824b05dc53392b92c03ec1dd41123f8100e8f\": RecentStats: unable to find data in memory cache]" Jan 20 20:31:42 crc kubenswrapper[4948]: I0120 20:31:42.516670 4948 generic.go:334] "Generic (PLEG): container finished" podID="4bb85740-d63d-4363-91af-c07eecf6ab45" containerID="f9a650c3dd24b3987d22dcc29dec0842ef33386ca6ac31121b8648ab651be73f" exitCode=0 Jan 20 20:31:42 crc kubenswrapper[4948]: I0120 20:31:42.516736 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" event={"ID":"4bb85740-d63d-4363-91af-c07eecf6ab45","Type":"ContainerDied","Data":"f9a650c3dd24b3987d22dcc29dec0842ef33386ca6ac31121b8648ab651be73f"} Jan 20 20:31:42 crc kubenswrapper[4948]: I0120 20:31:42.580761 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:31:42 crc kubenswrapper[4948]: E0120 20:31:42.581180 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:31:43 crc kubenswrapper[4948]: I0120 20:31:43.976774 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078167 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-0\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078243 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45lcp\" (UniqueName: \"kubernetes.io/projected/4bb85740-d63d-4363-91af-c07eecf6ab45-kube-api-access-45lcp\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078305 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-combined-ca-bundle\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078364 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-1\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078397 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-inventory\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078486 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-0\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078532 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-1\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078620 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-ssh-key-openstack-edpm-ipam\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.078670 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-extra-config-0\") pod \"4bb85740-d63d-4363-91af-c07eecf6ab45\" (UID: \"4bb85740-d63d-4363-91af-c07eecf6ab45\") " Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.104848 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb85740-d63d-4363-91af-c07eecf6ab45-kube-api-access-45lcp" (OuterVolumeSpecName: "kube-api-access-45lcp") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "kube-api-access-45lcp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.107260 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.110882 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.112483 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.123127 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-inventory" (OuterVolumeSpecName: "inventory") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.125218 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.131867 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.152508 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.155871 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "4bb85740-d63d-4363-91af-c07eecf6ab45" (UID: "4bb85740-d63d-4363-91af-c07eecf6ab45"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181629 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181672 4948 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181684 4948 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181693 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181714 4948 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181724 4948 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181732 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45lcp\" (UniqueName: \"kubernetes.io/projected/4bb85740-d63d-4363-91af-c07eecf6ab45-kube-api-access-45lcp\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181741 4948 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.181749 4948 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/4bb85740-d63d-4363-91af-c07eecf6ab45-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.536228 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" event={"ID":"4bb85740-d63d-4363-91af-c07eecf6ab45","Type":"ContainerDied","Data":"346997676b187c648d6ebfc29520e1e634f9679f0f193d9e7cd2771c97998b0a"} Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.536278 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="346997676b187c648d6ebfc29520e1e634f9679f0f193d9e7cd2771c97998b0a" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.536313 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-x5v8p" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.670808 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b"] Jan 20 20:31:44 crc kubenswrapper[4948]: E0120 20:31:44.671525 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="extract-utilities" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.671545 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="extract-utilities" Jan 20 20:31:44 crc kubenswrapper[4948]: E0120 20:31:44.671554 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="registry-server" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.671561 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="registry-server" Jan 20 20:31:44 crc kubenswrapper[4948]: E0120 20:31:44.671575 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="extract-content" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.671581 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="extract-content" Jan 20 20:31:44 crc kubenswrapper[4948]: E0120 20:31:44.671609 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bb85740-d63d-4363-91af-c07eecf6ab45" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.671615 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bb85740-d63d-4363-91af-c07eecf6ab45" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.671825 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bb85740-d63d-4363-91af-c07eecf6ab45" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.671845 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd12cbb5-30a9-49d2-98e6-9c2e87a3640b" containerName="registry-server" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.672455 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.684592 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b"] Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.684799 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.684978 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.685210 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.687107 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.687327 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-kfwmn" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.795577 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.795644 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.795914 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.795974 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.796080 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4lnk\" (UniqueName: \"kubernetes.io/projected/28bbc15a-1085-4cbd-9dac-0180526816bc-kube-api-access-q4lnk\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.796136 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.796191 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.897614 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.897678 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4lnk\" (UniqueName: \"kubernetes.io/projected/28bbc15a-1085-4cbd-9dac-0180526816bc-kube-api-access-q4lnk\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.897723 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.897763 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.897851 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.897876 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.897930 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.903552 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.903583 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.903737 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.904214 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.904856 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.906219 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.917131 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4lnk\" (UniqueName: \"kubernetes.io/projected/28bbc15a-1085-4cbd-9dac-0180526816bc-kube-api-access-q4lnk\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ht82b\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:44 crc kubenswrapper[4948]: I0120 20:31:44.991960 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:31:45 crc kubenswrapper[4948]: I0120 20:31:45.789511 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b"] Jan 20 20:31:46 crc kubenswrapper[4948]: I0120 20:31:46.587328 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" event={"ID":"28bbc15a-1085-4cbd-9dac-0180526816bc","Type":"ContainerStarted","Data":"2dc0a58b99a6177fb278dcde7dcf7c463f8ee58b08d724e1f8d2fe9a5f458530"} Jan 20 20:31:46 crc kubenswrapper[4948]: I0120 20:31:46.588591 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" event={"ID":"28bbc15a-1085-4cbd-9dac-0180526816bc","Type":"ContainerStarted","Data":"09f7e35a8f2c8ea50387850274e8e81dfc150b9f1d0c868b1e9996c0f2c68e54"} Jan 20 20:31:46 crc kubenswrapper[4948]: I0120 20:31:46.622098 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" podStartSLOduration=2.184396102 podStartE2EDuration="2.622070214s" podCreationTimestamp="2026-01-20 20:31:44 +0000 UTC" firstStartedPulling="2026-01-20 20:31:45.812595288 +0000 UTC m=+2533.763320267" lastFinishedPulling="2026-01-20 20:31:46.25026941 +0000 UTC m=+2534.200994379" observedRunningTime="2026-01-20 20:31:46.609581908 +0000 UTC m=+2534.560306877" watchObservedRunningTime="2026-01-20 20:31:46.622070214 +0000 UTC m=+2534.572795193" Jan 20 20:31:56 crc kubenswrapper[4948]: I0120 20:31:56.570458 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:31:57 crc kubenswrapper[4948]: I0120 20:31:57.732057 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"934acfbdee878cbe138279fabb4eca853e3510e2798842469d941a73da9705e1"} Jan 20 20:34:20 crc kubenswrapper[4948]: I0120 20:34:20.249391 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:34:20 crc kubenswrapper[4948]: I0120 20:34:20.250781 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:34:50 crc kubenswrapper[4948]: I0120 20:34:50.250330 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:34:50 crc kubenswrapper[4948]: I0120 20:34:50.251227 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:34:53 crc kubenswrapper[4948]: I0120 20:34:53.658941 4948 generic.go:334] "Generic (PLEG): container finished" podID="28bbc15a-1085-4cbd-9dac-0180526816bc" containerID="2dc0a58b99a6177fb278dcde7dcf7c463f8ee58b08d724e1f8d2fe9a5f458530" exitCode=0 Jan 20 20:34:53 crc kubenswrapper[4948]: I0120 20:34:53.659130 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" event={"ID":"28bbc15a-1085-4cbd-9dac-0180526816bc","Type":"ContainerDied","Data":"2dc0a58b99a6177fb278dcde7dcf7c463f8ee58b08d724e1f8d2fe9a5f458530"} Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.177686 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.319573 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-inventory\") pod \"28bbc15a-1085-4cbd-9dac-0180526816bc\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.319728 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-1\") pod \"28bbc15a-1085-4cbd-9dac-0180526816bc\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.319854 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4lnk\" (UniqueName: \"kubernetes.io/projected/28bbc15a-1085-4cbd-9dac-0180526816bc-kube-api-access-q4lnk\") pod \"28bbc15a-1085-4cbd-9dac-0180526816bc\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.319920 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ssh-key-openstack-edpm-ipam\") pod \"28bbc15a-1085-4cbd-9dac-0180526816bc\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.320055 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-telemetry-combined-ca-bundle\") pod \"28bbc15a-1085-4cbd-9dac-0180526816bc\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.320109 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-2\") pod \"28bbc15a-1085-4cbd-9dac-0180526816bc\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.320240 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-0\") pod \"28bbc15a-1085-4cbd-9dac-0180526816bc\" (UID: \"28bbc15a-1085-4cbd-9dac-0180526816bc\") " Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.331921 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28bbc15a-1085-4cbd-9dac-0180526816bc-kube-api-access-q4lnk" (OuterVolumeSpecName: "kube-api-access-q4lnk") pod "28bbc15a-1085-4cbd-9dac-0180526816bc" (UID: "28bbc15a-1085-4cbd-9dac-0180526816bc"). InnerVolumeSpecName "kube-api-access-q4lnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.338690 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "28bbc15a-1085-4cbd-9dac-0180526816bc" (UID: "28bbc15a-1085-4cbd-9dac-0180526816bc"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.370791 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-inventory" (OuterVolumeSpecName: "inventory") pod "28bbc15a-1085-4cbd-9dac-0180526816bc" (UID: "28bbc15a-1085-4cbd-9dac-0180526816bc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.374020 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "28bbc15a-1085-4cbd-9dac-0180526816bc" (UID: "28bbc15a-1085-4cbd-9dac-0180526816bc"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.374968 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "28bbc15a-1085-4cbd-9dac-0180526816bc" (UID: "28bbc15a-1085-4cbd-9dac-0180526816bc"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.386131 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "28bbc15a-1085-4cbd-9dac-0180526816bc" (UID: "28bbc15a-1085-4cbd-9dac-0180526816bc"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.405689 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "28bbc15a-1085-4cbd-9dac-0180526816bc" (UID: "28bbc15a-1085-4cbd-9dac-0180526816bc"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.422228 4948 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.422267 4948 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-inventory\") on node \"crc\" DevicePath \"\"" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.422279 4948 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.422289 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4lnk\" (UniqueName: \"kubernetes.io/projected/28bbc15a-1085-4cbd-9dac-0180526816bc-kube-api-access-q4lnk\") on node \"crc\" DevicePath \"\"" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.422300 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.422308 4948 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.422316 4948 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/28bbc15a-1085-4cbd-9dac-0180526816bc-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.675995 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" event={"ID":"28bbc15a-1085-4cbd-9dac-0180526816bc","Type":"ContainerDied","Data":"09f7e35a8f2c8ea50387850274e8e81dfc150b9f1d0c868b1e9996c0f2c68e54"} Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.676482 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09f7e35a8f2c8ea50387850274e8e81dfc150b9f1d0c868b1e9996c0f2c68e54" Jan 20 20:34:55 crc kubenswrapper[4948]: I0120 20:34:55.676132 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ht82b" Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.249626 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.250185 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.250232 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.251041 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"934acfbdee878cbe138279fabb4eca853e3510e2798842469d941a73da9705e1"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.251091 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://934acfbdee878cbe138279fabb4eca853e3510e2798842469d941a73da9705e1" gracePeriod=600 Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.891478 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="934acfbdee878cbe138279fabb4eca853e3510e2798842469d941a73da9705e1" exitCode=0 Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.891572 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"934acfbdee878cbe138279fabb4eca853e3510e2798842469d941a73da9705e1"} Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.892079 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f"} Jan 20 20:35:20 crc kubenswrapper[4948]: I0120 20:35:20.892300 4948 scope.go:117] "RemoveContainer" containerID="103dc17e17b32b0c5c3d3bc0b47e648415b499675bbfb5c4c2a56ac2a7505a75" Jan 20 20:35:38 crc kubenswrapper[4948]: I0120 20:35:38.994182 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Jan 20 20:35:38 crc kubenswrapper[4948]: E0120 20:35:38.995260 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28bbc15a-1085-4cbd-9dac-0180526816bc" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 20 20:35:38 crc kubenswrapper[4948]: I0120 20:35:38.995285 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="28bbc15a-1085-4cbd-9dac-0180526816bc" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 20 20:35:38 crc kubenswrapper[4948]: I0120 20:35:38.995526 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="28bbc15a-1085-4cbd-9dac-0180526816bc" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 20 20:35:38 crc kubenswrapper[4948]: I0120 20:35:38.996470 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 20 20:35:38 crc kubenswrapper[4948]: I0120 20:35:38.999744 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.000054 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-skvjj" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.005687 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.006452 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.009143 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.074204 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.074242 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.074298 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-config-data\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.175872 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.175927 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggmkm\" (UniqueName: \"kubernetes.io/projected/84db0de1-b0d6-4a7f-88d8-6470a493ef78-kube-api-access-ggmkm\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.176051 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.176133 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.176158 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.176239 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.176271 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-config-data\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.176291 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.176313 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.177479 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.177657 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-config-data\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.185556 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.277862 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.277917 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.277981 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.278021 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggmkm\" (UniqueName: \"kubernetes.io/projected/84db0de1-b0d6-4a7f-88d8-6470a493ef78-kube-api-access-ggmkm\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.278072 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.278175 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.278524 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.279347 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.279360 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.283682 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.286246 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.301300 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggmkm\" (UniqueName: \"kubernetes.io/projected/84db0de1-b0d6-4a7f-88d8-6470a493ef78-kube-api-access-ggmkm\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.311969 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.329147 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 20 20:35:39 crc kubenswrapper[4948]: I0120 20:35:39.918955 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 20 20:35:40 crc kubenswrapper[4948]: I0120 20:35:40.062629 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"84db0de1-b0d6-4a7f-88d8-6470a493ef78","Type":"ContainerStarted","Data":"745e1a6e3ae403d89258638a518025b2d805c20469f991c1a4cd1df71d28c300"} Jan 20 20:36:18 crc kubenswrapper[4948]: E0120 20:36:18.606409 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Jan 20 20:36:18 crc kubenswrapper[4948]: E0120 20:36:18.613614 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ggmkm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(84db0de1-b0d6-4a7f-88d8-6470a493ef78): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:36:18 crc kubenswrapper[4948]: E0120 20:36:18.615278 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="84db0de1-b0d6-4a7f-88d8-6470a493ef78" Jan 20 20:36:19 crc kubenswrapper[4948]: E0120 20:36:19.523881 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="84db0de1-b0d6-4a7f-88d8-6470a493ef78" Jan 20 20:36:30 crc kubenswrapper[4948]: I0120 20:36:30.575113 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:36:32 crc kubenswrapper[4948]: I0120 20:36:32.640332 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"84db0de1-b0d6-4a7f-88d8-6470a493ef78","Type":"ContainerStarted","Data":"4db02a5315b05e2428ad2343db2882c6c6dd8cbb2d71bb457537c6348090fccf"} Jan 20 20:36:32 crc kubenswrapper[4948]: I0120 20:36:32.671031 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.537875802 podStartE2EDuration="55.671004578s" podCreationTimestamp="2026-01-20 20:35:37 +0000 UTC" firstStartedPulling="2026-01-20 20:35:39.928014074 +0000 UTC m=+2767.878739033" lastFinishedPulling="2026-01-20 20:36:31.06114284 +0000 UTC m=+2819.011867809" observedRunningTime="2026-01-20 20:36:32.659117223 +0000 UTC m=+2820.609842202" watchObservedRunningTime="2026-01-20 20:36:32.671004578 +0000 UTC m=+2820.621729547" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.749891 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dh787"] Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.761220 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.762783 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dh787"] Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.883082 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrcz2\" (UniqueName: \"kubernetes.io/projected/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-kube-api-access-wrcz2\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.883137 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-utilities\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.883176 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-catalog-content\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.984842 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrcz2\" (UniqueName: \"kubernetes.io/projected/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-kube-api-access-wrcz2\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.984939 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-utilities\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.984981 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-catalog-content\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.985657 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-catalog-content\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:37 crc kubenswrapper[4948]: I0120 20:36:37.985689 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-utilities\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:38 crc kubenswrapper[4948]: I0120 20:36:38.022321 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrcz2\" (UniqueName: \"kubernetes.io/projected/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-kube-api-access-wrcz2\") pod \"redhat-marketplace-dh787\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:38 crc kubenswrapper[4948]: I0120 20:36:38.104587 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:38 crc kubenswrapper[4948]: I0120 20:36:38.688462 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dh787"] Jan 20 20:36:39 crc kubenswrapper[4948]: I0120 20:36:39.716287 4948 generic.go:334] "Generic (PLEG): container finished" podID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerID="b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60" exitCode=0 Jan 20 20:36:39 crc kubenswrapper[4948]: I0120 20:36:39.716537 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dh787" event={"ID":"e64bcc16-fd71-42a1-a94d-95f99d6c5d21","Type":"ContainerDied","Data":"b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60"} Jan 20 20:36:39 crc kubenswrapper[4948]: I0120 20:36:39.716568 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dh787" event={"ID":"e64bcc16-fd71-42a1-a94d-95f99d6c5d21","Type":"ContainerStarted","Data":"8aef536b2d20a6458306dabb35f5cbab20b3d59dc7fda5f1c4ad5a1f29710e8b"} Jan 20 20:36:40 crc kubenswrapper[4948]: I0120 20:36:40.726475 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dh787" event={"ID":"e64bcc16-fd71-42a1-a94d-95f99d6c5d21","Type":"ContainerStarted","Data":"5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1"} Jan 20 20:36:41 crc kubenswrapper[4948]: I0120 20:36:41.736090 4948 generic.go:334] "Generic (PLEG): container finished" podID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerID="5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1" exitCode=0 Jan 20 20:36:41 crc kubenswrapper[4948]: I0120 20:36:41.736133 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dh787" event={"ID":"e64bcc16-fd71-42a1-a94d-95f99d6c5d21","Type":"ContainerDied","Data":"5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1"} Jan 20 20:36:42 crc kubenswrapper[4948]: I0120 20:36:42.747216 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dh787" event={"ID":"e64bcc16-fd71-42a1-a94d-95f99d6c5d21","Type":"ContainerStarted","Data":"62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586"} Jan 20 20:36:42 crc kubenswrapper[4948]: I0120 20:36:42.768481 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dh787" podStartSLOduration=3.311648743 podStartE2EDuration="5.768463687s" podCreationTimestamp="2026-01-20 20:36:37 +0000 UTC" firstStartedPulling="2026-01-20 20:36:39.719568789 +0000 UTC m=+2827.670293758" lastFinishedPulling="2026-01-20 20:36:42.176383733 +0000 UTC m=+2830.127108702" observedRunningTime="2026-01-20 20:36:42.763303961 +0000 UTC m=+2830.714028930" watchObservedRunningTime="2026-01-20 20:36:42.768463687 +0000 UTC m=+2830.719188656" Jan 20 20:36:48 crc kubenswrapper[4948]: I0120 20:36:48.105619 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:48 crc kubenswrapper[4948]: I0120 20:36:48.106799 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:48 crc kubenswrapper[4948]: I0120 20:36:48.156531 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:48 crc kubenswrapper[4948]: I0120 20:36:48.855470 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:48 crc kubenswrapper[4948]: I0120 20:36:48.922397 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dh787"] Jan 20 20:36:50 crc kubenswrapper[4948]: I0120 20:36:50.824158 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dh787" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerName="registry-server" containerID="cri-o://62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586" gracePeriod=2 Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.393647 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.563147 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-utilities\") pod \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.563280 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrcz2\" (UniqueName: \"kubernetes.io/projected/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-kube-api-access-wrcz2\") pod \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.563469 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-catalog-content\") pod \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\" (UID: \"e64bcc16-fd71-42a1-a94d-95f99d6c5d21\") " Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.564185 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-utilities" (OuterVolumeSpecName: "utilities") pod "e64bcc16-fd71-42a1-a94d-95f99d6c5d21" (UID: "e64bcc16-fd71-42a1-a94d-95f99d6c5d21"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.573928 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-kube-api-access-wrcz2" (OuterVolumeSpecName: "kube-api-access-wrcz2") pod "e64bcc16-fd71-42a1-a94d-95f99d6c5d21" (UID: "e64bcc16-fd71-42a1-a94d-95f99d6c5d21"). InnerVolumeSpecName "kube-api-access-wrcz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.583204 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e64bcc16-fd71-42a1-a94d-95f99d6c5d21" (UID: "e64bcc16-fd71-42a1-a94d-95f99d6c5d21"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.665129 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.665164 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.665174 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrcz2\" (UniqueName: \"kubernetes.io/projected/e64bcc16-fd71-42a1-a94d-95f99d6c5d21-kube-api-access-wrcz2\") on node \"crc\" DevicePath \"\"" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.833370 4948 generic.go:334] "Generic (PLEG): container finished" podID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerID="62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586" exitCode=0 Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.833421 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dh787" event={"ID":"e64bcc16-fd71-42a1-a94d-95f99d6c5d21","Type":"ContainerDied","Data":"62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586"} Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.833451 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dh787" event={"ID":"e64bcc16-fd71-42a1-a94d-95f99d6c5d21","Type":"ContainerDied","Data":"8aef536b2d20a6458306dabb35f5cbab20b3d59dc7fda5f1c4ad5a1f29710e8b"} Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.833469 4948 scope.go:117] "RemoveContainer" containerID="62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.833612 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dh787" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.865163 4948 scope.go:117] "RemoveContainer" containerID="5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.871246 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dh787"] Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.883946 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dh787"] Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.893192 4948 scope.go:117] "RemoveContainer" containerID="b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.932541 4948 scope.go:117] "RemoveContainer" containerID="62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586" Jan 20 20:36:51 crc kubenswrapper[4948]: E0120 20:36:51.933058 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586\": container with ID starting with 62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586 not found: ID does not exist" containerID="62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.933098 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586"} err="failed to get container status \"62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586\": rpc error: code = NotFound desc = could not find container \"62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586\": container with ID starting with 62e075bf73982db55793a6699895853fa04531eab6fd6641a572e66127159586 not found: ID does not exist" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.933124 4948 scope.go:117] "RemoveContainer" containerID="5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1" Jan 20 20:36:51 crc kubenswrapper[4948]: E0120 20:36:51.933442 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1\": container with ID starting with 5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1 not found: ID does not exist" containerID="5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.933467 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1"} err="failed to get container status \"5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1\": rpc error: code = NotFound desc = could not find container \"5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1\": container with ID starting with 5dd7e082cbc9e04c7184dfb3c831305d1894c1759709e0c2f1eee8998b1a2fa1 not found: ID does not exist" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.933491 4948 scope.go:117] "RemoveContainer" containerID="b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60" Jan 20 20:36:51 crc kubenswrapper[4948]: E0120 20:36:51.933878 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60\": container with ID starting with b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60 not found: ID does not exist" containerID="b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60" Jan 20 20:36:51 crc kubenswrapper[4948]: I0120 20:36:51.933904 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60"} err="failed to get container status \"b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60\": rpc error: code = NotFound desc = could not find container \"b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60\": container with ID starting with b1ae7a634f9cb29e7f86b362e97e7958b39c64a397898a83f47193505659bf60 not found: ID does not exist" Jan 20 20:36:52 crc kubenswrapper[4948]: I0120 20:36:52.585358 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" path="/var/lib/kubelet/pods/e64bcc16-fd71-42a1-a94d-95f99d6c5d21/volumes" Jan 20 20:37:20 crc kubenswrapper[4948]: I0120 20:37:20.249949 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:37:20 crc kubenswrapper[4948]: I0120 20:37:20.250434 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.829546 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ls5mb"] Jan 20 20:37:29 crc kubenswrapper[4948]: E0120 20:37:29.830684 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerName="extract-utilities" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.830762 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerName="extract-utilities" Jan 20 20:37:29 crc kubenswrapper[4948]: E0120 20:37:29.830817 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerName="registry-server" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.830826 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerName="registry-server" Jan 20 20:37:29 crc kubenswrapper[4948]: E0120 20:37:29.830848 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerName="extract-content" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.830855 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerName="extract-content" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.831157 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="e64bcc16-fd71-42a1-a94d-95f99d6c5d21" containerName="registry-server" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.836447 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.846649 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ls5mb"] Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.889899 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-catalog-content\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.889975 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kcvs\" (UniqueName: \"kubernetes.io/projected/10e84498-0973-46a1-8ac2-c100d3cc97f6-kube-api-access-7kcvs\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.890109 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-utilities\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.992282 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-utilities\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.992384 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-catalog-content\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.992450 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kcvs\" (UniqueName: \"kubernetes.io/projected/10e84498-0973-46a1-8ac2-c100d3cc97f6-kube-api-access-7kcvs\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.992948 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-utilities\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:29 crc kubenswrapper[4948]: I0120 20:37:29.992966 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-catalog-content\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:30 crc kubenswrapper[4948]: I0120 20:37:30.011496 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kcvs\" (UniqueName: \"kubernetes.io/projected/10e84498-0973-46a1-8ac2-c100d3cc97f6-kube-api-access-7kcvs\") pod \"community-operators-ls5mb\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:30 crc kubenswrapper[4948]: I0120 20:37:30.158937 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:31 crc kubenswrapper[4948]: I0120 20:37:31.208722 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ls5mb"] Jan 20 20:37:31 crc kubenswrapper[4948]: I0120 20:37:31.301545 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ls5mb" event={"ID":"10e84498-0973-46a1-8ac2-c100d3cc97f6","Type":"ContainerStarted","Data":"714e10701f2f83f0862c46771533b64a2741bdd4c4370da3e3cd4900f905cb4e"} Jan 20 20:37:32 crc kubenswrapper[4948]: I0120 20:37:32.312017 4948 generic.go:334] "Generic (PLEG): container finished" podID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerID="c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab" exitCode=0 Jan 20 20:37:32 crc kubenswrapper[4948]: I0120 20:37:32.312151 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ls5mb" event={"ID":"10e84498-0973-46a1-8ac2-c100d3cc97f6","Type":"ContainerDied","Data":"c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab"} Jan 20 20:37:34 crc kubenswrapper[4948]: I0120 20:37:34.330569 4948 generic.go:334] "Generic (PLEG): container finished" podID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerID="696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766" exitCode=0 Jan 20 20:37:34 crc kubenswrapper[4948]: I0120 20:37:34.330674 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ls5mb" event={"ID":"10e84498-0973-46a1-8ac2-c100d3cc97f6","Type":"ContainerDied","Data":"696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766"} Jan 20 20:37:35 crc kubenswrapper[4948]: I0120 20:37:35.348094 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ls5mb" event={"ID":"10e84498-0973-46a1-8ac2-c100d3cc97f6","Type":"ContainerStarted","Data":"8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865"} Jan 20 20:37:40 crc kubenswrapper[4948]: I0120 20:37:40.160043 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:40 crc kubenswrapper[4948]: I0120 20:37:40.161479 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:40 crc kubenswrapper[4948]: I0120 20:37:40.217738 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:40 crc kubenswrapper[4948]: I0120 20:37:40.243894 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ls5mb" podStartSLOduration=8.765798836 podStartE2EDuration="11.243869679s" podCreationTimestamp="2026-01-20 20:37:29 +0000 UTC" firstStartedPulling="2026-01-20 20:37:32.313863471 +0000 UTC m=+2880.264588430" lastFinishedPulling="2026-01-20 20:37:34.791934294 +0000 UTC m=+2882.742659273" observedRunningTime="2026-01-20 20:37:35.372301628 +0000 UTC m=+2883.323026597" watchObservedRunningTime="2026-01-20 20:37:40.243869679 +0000 UTC m=+2888.194594658" Jan 20 20:37:40 crc kubenswrapper[4948]: I0120 20:37:40.441893 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:40 crc kubenswrapper[4948]: I0120 20:37:40.492273 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ls5mb"] Jan 20 20:37:42 crc kubenswrapper[4948]: I0120 20:37:42.413664 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ls5mb" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerName="registry-server" containerID="cri-o://8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865" gracePeriod=2 Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.012723 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.116015 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-utilities\") pod \"10e84498-0973-46a1-8ac2-c100d3cc97f6\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.116167 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7kcvs\" (UniqueName: \"kubernetes.io/projected/10e84498-0973-46a1-8ac2-c100d3cc97f6-kube-api-access-7kcvs\") pod \"10e84498-0973-46a1-8ac2-c100d3cc97f6\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.116249 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-catalog-content\") pod \"10e84498-0973-46a1-8ac2-c100d3cc97f6\" (UID: \"10e84498-0973-46a1-8ac2-c100d3cc97f6\") " Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.117074 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-utilities" (OuterVolumeSpecName: "utilities") pod "10e84498-0973-46a1-8ac2-c100d3cc97f6" (UID: "10e84498-0973-46a1-8ac2-c100d3cc97f6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.126546 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10e84498-0973-46a1-8ac2-c100d3cc97f6-kube-api-access-7kcvs" (OuterVolumeSpecName: "kube-api-access-7kcvs") pod "10e84498-0973-46a1-8ac2-c100d3cc97f6" (UID: "10e84498-0973-46a1-8ac2-c100d3cc97f6"). InnerVolumeSpecName "kube-api-access-7kcvs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.181461 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "10e84498-0973-46a1-8ac2-c100d3cc97f6" (UID: "10e84498-0973-46a1-8ac2-c100d3cc97f6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.218744 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.219157 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7kcvs\" (UniqueName: \"kubernetes.io/projected/10e84498-0973-46a1-8ac2-c100d3cc97f6-kube-api-access-7kcvs\") on node \"crc\" DevicePath \"\"" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.219309 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10e84498-0973-46a1-8ac2-c100d3cc97f6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.424243 4948 generic.go:334] "Generic (PLEG): container finished" podID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerID="8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865" exitCode=0 Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.424297 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ls5mb" event={"ID":"10e84498-0973-46a1-8ac2-c100d3cc97f6","Type":"ContainerDied","Data":"8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865"} Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.424336 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ls5mb" event={"ID":"10e84498-0973-46a1-8ac2-c100d3cc97f6","Type":"ContainerDied","Data":"714e10701f2f83f0862c46771533b64a2741bdd4c4370da3e3cd4900f905cb4e"} Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.424332 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ls5mb" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.424354 4948 scope.go:117] "RemoveContainer" containerID="8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.443841 4948 scope.go:117] "RemoveContainer" containerID="696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.468556 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ls5mb"] Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.481850 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ls5mb"] Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.482659 4948 scope.go:117] "RemoveContainer" containerID="c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.512164 4948 scope.go:117] "RemoveContainer" containerID="8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865" Jan 20 20:37:43 crc kubenswrapper[4948]: E0120 20:37:43.513543 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865\": container with ID starting with 8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865 not found: ID does not exist" containerID="8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.513586 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865"} err="failed to get container status \"8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865\": rpc error: code = NotFound desc = could not find container \"8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865\": container with ID starting with 8496c6f5d5541ce4ceb77820edccfb99f874ff76dc67bd3d3a1adc3b1da56865 not found: ID does not exist" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.513637 4948 scope.go:117] "RemoveContainer" containerID="696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766" Jan 20 20:37:43 crc kubenswrapper[4948]: E0120 20:37:43.514065 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766\": container with ID starting with 696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766 not found: ID does not exist" containerID="696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.514088 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766"} err="failed to get container status \"696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766\": rpc error: code = NotFound desc = could not find container \"696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766\": container with ID starting with 696e465d3bb2920876829b723d4b492e307b494b5441f1bfd5965ff1cd3bc766 not found: ID does not exist" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.514102 4948 scope.go:117] "RemoveContainer" containerID="c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab" Jan 20 20:37:43 crc kubenswrapper[4948]: E0120 20:37:43.514351 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab\": container with ID starting with c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab not found: ID does not exist" containerID="c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab" Jan 20 20:37:43 crc kubenswrapper[4948]: I0120 20:37:43.514370 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab"} err="failed to get container status \"c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab\": rpc error: code = NotFound desc = could not find container \"c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab\": container with ID starting with c1f2955686d238bea30341a7ff335c5571ad755d3b236992153ab6a2953341ab not found: ID does not exist" Jan 20 20:37:44 crc kubenswrapper[4948]: I0120 20:37:44.602911 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" path="/var/lib/kubelet/pods/10e84498-0973-46a1-8ac2-c100d3cc97f6/volumes" Jan 20 20:37:50 crc kubenswrapper[4948]: I0120 20:37:50.249654 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:37:50 crc kubenswrapper[4948]: I0120 20:37:50.250242 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:38:03 crc kubenswrapper[4948]: I0120 20:38:03.591697 4948 generic.go:334] "Generic (PLEG): container finished" podID="84db0de1-b0d6-4a7f-88d8-6470a493ef78" containerID="4db02a5315b05e2428ad2343db2882c6c6dd8cbb2d71bb457537c6348090fccf" exitCode=0 Jan 20 20:38:03 crc kubenswrapper[4948]: I0120 20:38:03.591743 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"84db0de1-b0d6-4a7f-88d8-6470a493ef78","Type":"ContainerDied","Data":"4db02a5315b05e2428ad2343db2882c6c6dd8cbb2d71bb457537c6348090fccf"} Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.099340 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186287 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186384 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggmkm\" (UniqueName: \"kubernetes.io/projected/84db0de1-b0d6-4a7f-88d8-6470a493ef78-kube-api-access-ggmkm\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186441 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186459 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-config-data\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186520 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-temporary\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186580 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config-secret\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186610 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-workdir\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186666 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ssh-key\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.186725 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ca-certs\") pod \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\" (UID: \"84db0de1-b0d6-4a7f-88d8-6470a493ef78\") " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.187275 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-config-data" (OuterVolumeSpecName: "config-data") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.187471 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.191851 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.193578 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "test-operator-logs") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.194557 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84db0de1-b0d6-4a7f-88d8-6470a493ef78-kube-api-access-ggmkm" (OuterVolumeSpecName: "kube-api-access-ggmkm") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "kube-api-access-ggmkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.221194 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.222720 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.244427 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.245310 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "84db0de1-b0d6-4a7f-88d8-6470a493ef78" (UID: "84db0de1-b0d6-4a7f-88d8-6470a493ef78"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.289267 4948 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.289303 4948 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.289313 4948 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.289324 4948 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/84db0de1-b0d6-4a7f-88d8-6470a493ef78-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.289333 4948 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.289343 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggmkm\" (UniqueName: \"kubernetes.io/projected/84db0de1-b0d6-4a7f-88d8-6470a493ef78-kube-api-access-ggmkm\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.290475 4948 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.290497 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/84db0de1-b0d6-4a7f-88d8-6470a493ef78-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.290508 4948 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/84db0de1-b0d6-4a7f-88d8-6470a493ef78-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.317557 4948 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.392700 4948 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.610184 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"84db0de1-b0d6-4a7f-88d8-6470a493ef78","Type":"ContainerDied","Data":"745e1a6e3ae403d89258638a518025b2d805c20469f991c1a4cd1df71d28c300"} Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.610490 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="745e1a6e3ae403d89258638a518025b2d805c20469f991c1a4cd1df71d28c300" Jan 20 20:38:05 crc kubenswrapper[4948]: I0120 20:38:05.610239 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.796299 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 20 20:38:10 crc kubenswrapper[4948]: E0120 20:38:10.797354 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerName="extract-content" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.797372 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerName="extract-content" Jan 20 20:38:10 crc kubenswrapper[4948]: E0120 20:38:10.797408 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerName="registry-server" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.797415 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerName="registry-server" Jan 20 20:38:10 crc kubenswrapper[4948]: E0120 20:38:10.797432 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerName="extract-utilities" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.797438 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerName="extract-utilities" Jan 20 20:38:10 crc kubenswrapper[4948]: E0120 20:38:10.797448 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84db0de1-b0d6-4a7f-88d8-6470a493ef78" containerName="tempest-tests-tempest-tests-runner" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.797454 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="84db0de1-b0d6-4a7f-88d8-6470a493ef78" containerName="tempest-tests-tempest-tests-runner" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.797633 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="84db0de1-b0d6-4a7f-88d8-6470a493ef78" containerName="tempest-tests-tempest-tests-runner" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.797648 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="10e84498-0973-46a1-8ac2-c100d3cc97f6" containerName="registry-server" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.798307 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.805394 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-skvjj" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.810305 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.900561 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptdfq\" (UniqueName: \"kubernetes.io/projected/5db0e8eb-349c-41d5-96d3-9025f96d2869-kube-api-access-ptdfq\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5db0e8eb-349c-41d5-96d3-9025f96d2869\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:10 crc kubenswrapper[4948]: I0120 20:38:10.900777 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5db0e8eb-349c-41d5-96d3-9025f96d2869\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:11 crc kubenswrapper[4948]: I0120 20:38:11.002211 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5db0e8eb-349c-41d5-96d3-9025f96d2869\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:11 crc kubenswrapper[4948]: I0120 20:38:11.002606 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdfq\" (UniqueName: \"kubernetes.io/projected/5db0e8eb-349c-41d5-96d3-9025f96d2869-kube-api-access-ptdfq\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5db0e8eb-349c-41d5-96d3-9025f96d2869\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:11 crc kubenswrapper[4948]: I0120 20:38:11.002841 4948 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5db0e8eb-349c-41d5-96d3-9025f96d2869\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:11 crc kubenswrapper[4948]: I0120 20:38:11.036164 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptdfq\" (UniqueName: \"kubernetes.io/projected/5db0e8eb-349c-41d5-96d3-9025f96d2869-kube-api-access-ptdfq\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5db0e8eb-349c-41d5-96d3-9025f96d2869\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:11 crc kubenswrapper[4948]: I0120 20:38:11.048017 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5db0e8eb-349c-41d5-96d3-9025f96d2869\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:11 crc kubenswrapper[4948]: I0120 20:38:11.117478 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 20 20:38:11 crc kubenswrapper[4948]: I0120 20:38:11.641339 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 20 20:38:11 crc kubenswrapper[4948]: I0120 20:38:11.667115 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"5db0e8eb-349c-41d5-96d3-9025f96d2869","Type":"ContainerStarted","Data":"1e6f3bfb91bae3b6312be72e97ad068c76990777bd375cb10e71cf50f941b000"} Jan 20 20:38:13 crc kubenswrapper[4948]: I0120 20:38:13.684858 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"5db0e8eb-349c-41d5-96d3-9025f96d2869","Type":"ContainerStarted","Data":"9ad75cee9f3447494962c6cb7b15c9097c2c2f7d9e59b925fa07b697b4f467cd"} Jan 20 20:38:13 crc kubenswrapper[4948]: I0120 20:38:13.712759 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.849366218 podStartE2EDuration="3.712740335s" podCreationTimestamp="2026-01-20 20:38:10 +0000 UTC" firstStartedPulling="2026-01-20 20:38:11.659676653 +0000 UTC m=+2919.610401622" lastFinishedPulling="2026-01-20 20:38:12.52305077 +0000 UTC m=+2920.473775739" observedRunningTime="2026-01-20 20:38:13.705548272 +0000 UTC m=+2921.656273231" watchObservedRunningTime="2026-01-20 20:38:13.712740335 +0000 UTC m=+2921.663465304" Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.249816 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.250381 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.250435 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.251232 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.251290 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" gracePeriod=600 Jan 20 20:38:20 crc kubenswrapper[4948]: E0120 20:38:20.372531 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.743223 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" exitCode=0 Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.743276 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f"} Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.743323 4948 scope.go:117] "RemoveContainer" containerID="934acfbdee878cbe138279fabb4eca853e3510e2798842469d941a73da9705e1" Jan 20 20:38:20 crc kubenswrapper[4948]: I0120 20:38:20.743974 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:38:20 crc kubenswrapper[4948]: E0120 20:38:20.744288 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:38:32 crc kubenswrapper[4948]: I0120 20:38:32.577055 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:38:32 crc kubenswrapper[4948]: E0120 20:38:32.577982 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.359299 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-7qrk8/must-gather-64jzl"] Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.361847 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.363639 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-7qrk8"/"default-dockercfg-9cdj5" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.364744 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-7qrk8"/"openshift-service-ca.crt" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.365031 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-7qrk8"/"kube-root-ca.crt" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.407904 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-7qrk8/must-gather-64jzl"] Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.415774 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq8kj\" (UniqueName: \"kubernetes.io/projected/337d06be-7739-418e-a1ec-9c1e0936cf6b-kube-api-access-bq8kj\") pod \"must-gather-64jzl\" (UID: \"337d06be-7739-418e-a1ec-9c1e0936cf6b\") " pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.415950 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/337d06be-7739-418e-a1ec-9c1e0936cf6b-must-gather-output\") pod \"must-gather-64jzl\" (UID: \"337d06be-7739-418e-a1ec-9c1e0936cf6b\") " pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.518144 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/337d06be-7739-418e-a1ec-9c1e0936cf6b-must-gather-output\") pod \"must-gather-64jzl\" (UID: \"337d06be-7739-418e-a1ec-9c1e0936cf6b\") " pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.518303 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq8kj\" (UniqueName: \"kubernetes.io/projected/337d06be-7739-418e-a1ec-9c1e0936cf6b-kube-api-access-bq8kj\") pod \"must-gather-64jzl\" (UID: \"337d06be-7739-418e-a1ec-9c1e0936cf6b\") " pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.518691 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/337d06be-7739-418e-a1ec-9c1e0936cf6b-must-gather-output\") pod \"must-gather-64jzl\" (UID: \"337d06be-7739-418e-a1ec-9c1e0936cf6b\") " pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.539462 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq8kj\" (UniqueName: \"kubernetes.io/projected/337d06be-7739-418e-a1ec-9c1e0936cf6b-kube-api-access-bq8kj\") pod \"must-gather-64jzl\" (UID: \"337d06be-7739-418e-a1ec-9c1e0936cf6b\") " pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:38:36 crc kubenswrapper[4948]: I0120 20:38:36.686287 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:38:37 crc kubenswrapper[4948]: I0120 20:38:37.024890 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-7qrk8/must-gather-64jzl"] Jan 20 20:38:37 crc kubenswrapper[4948]: I0120 20:38:37.923194 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/must-gather-64jzl" event={"ID":"337d06be-7739-418e-a1ec-9c1e0936cf6b","Type":"ContainerStarted","Data":"e1a6089a997061f9f46a31d44d53e10aabc4ebbc04dd77764aec88b3c48d1aeb"} Jan 20 20:38:44 crc kubenswrapper[4948]: I0120 20:38:44.570004 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:38:44 crc kubenswrapper[4948]: E0120 20:38:44.571484 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:38:45 crc kubenswrapper[4948]: I0120 20:38:45.006230 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/must-gather-64jzl" event={"ID":"337d06be-7739-418e-a1ec-9c1e0936cf6b","Type":"ContainerStarted","Data":"8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a"} Jan 20 20:38:45 crc kubenswrapper[4948]: I0120 20:38:45.006289 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/must-gather-64jzl" event={"ID":"337d06be-7739-418e-a1ec-9c1e0936cf6b","Type":"ContainerStarted","Data":"52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2"} Jan 20 20:38:45 crc kubenswrapper[4948]: I0120 20:38:45.029928 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-7qrk8/must-gather-64jzl" podStartSLOduration=2.010626349 podStartE2EDuration="9.029899484s" podCreationTimestamp="2026-01-20 20:38:36 +0000 UTC" firstStartedPulling="2026-01-20 20:38:37.013597021 +0000 UTC m=+2944.964321990" lastFinishedPulling="2026-01-20 20:38:44.032870156 +0000 UTC m=+2951.983595125" observedRunningTime="2026-01-20 20:38:45.026051916 +0000 UTC m=+2952.976776905" watchObservedRunningTime="2026-01-20 20:38:45.029899484 +0000 UTC m=+2952.980624463" Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.428827 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-7qrk8/crc-debug-lzwwn"] Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.430140 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.508228 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f21fee-2a4f-405d-b35b-d63530d51409-host\") pod \"crc-debug-lzwwn\" (UID: \"b8f21fee-2a4f-405d-b35b-d63530d51409\") " pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.508647 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2jx7\" (UniqueName: \"kubernetes.io/projected/b8f21fee-2a4f-405d-b35b-d63530d51409-kube-api-access-j2jx7\") pod \"crc-debug-lzwwn\" (UID: \"b8f21fee-2a4f-405d-b35b-d63530d51409\") " pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.610664 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f21fee-2a4f-405d-b35b-d63530d51409-host\") pod \"crc-debug-lzwwn\" (UID: \"b8f21fee-2a4f-405d-b35b-d63530d51409\") " pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.611069 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2jx7\" (UniqueName: \"kubernetes.io/projected/b8f21fee-2a4f-405d-b35b-d63530d51409-kube-api-access-j2jx7\") pod \"crc-debug-lzwwn\" (UID: \"b8f21fee-2a4f-405d-b35b-d63530d51409\") " pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.611337 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f21fee-2a4f-405d-b35b-d63530d51409-host\") pod \"crc-debug-lzwwn\" (UID: \"b8f21fee-2a4f-405d-b35b-d63530d51409\") " pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.655848 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2jx7\" (UniqueName: \"kubernetes.io/projected/b8f21fee-2a4f-405d-b35b-d63530d51409-kube-api-access-j2jx7\") pod \"crc-debug-lzwwn\" (UID: \"b8f21fee-2a4f-405d-b35b-d63530d51409\") " pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:38:49 crc kubenswrapper[4948]: I0120 20:38:49.751001 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:38:50 crc kubenswrapper[4948]: I0120 20:38:50.058229 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" event={"ID":"b8f21fee-2a4f-405d-b35b-d63530d51409","Type":"ContainerStarted","Data":"88f0f104346f558ae8a093c4d6ea2a237d89016c736fc26dc500bfc4a8e261cb"} Jan 20 20:38:52 crc kubenswrapper[4948]: I0120 20:38:52.856625 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-869694d5d6-n6ftn_7eca20c7-5485-4fce-9c6e-d3bd3943adc1/barbican-api-log/0.log" Jan 20 20:38:52 crc kubenswrapper[4948]: I0120 20:38:52.875051 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-869694d5d6-n6ftn_7eca20c7-5485-4fce-9c6e-d3bd3943adc1/barbican-api/0.log" Jan 20 20:38:52 crc kubenswrapper[4948]: I0120 20:38:52.971768 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-88477f558-k4bcx_e71b28b0-54d9-48ce-9442-412fbdd5fe0f/barbican-keystone-listener-log/0.log" Jan 20 20:38:52 crc kubenswrapper[4948]: I0120 20:38:52.982013 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-88477f558-k4bcx_e71b28b0-54d9-48ce-9442-412fbdd5fe0f/barbican-keystone-listener/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.005629 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6d76c4759-rj9ns_9b73cf57-92bd-47c5-8f21-ffcc9438594b/barbican-worker-log/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.016046 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6d76c4759-rj9ns_9b73cf57-92bd-47c5-8f21-ffcc9438594b/barbican-worker/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.079869 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn_11f8f855-5031-4c77-88c5-07f606419c1f/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.109010 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ad8829d7-3d58-4752-9f62-83663e2dad23/ceilometer-central-agent/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.140612 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ad8829d7-3d58-4752-9f62-83663e2dad23/ceilometer-notification-agent/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.145944 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ad8829d7-3d58-4752-9f62-83663e2dad23/sg-core/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.153903 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ad8829d7-3d58-4752-9f62-83663e2dad23/proxy-httpd/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.168449 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bf15b74a-2849-4970-87a3-83d7e1b788ba/cinder-api-log/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.213547 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bf15b74a-2849-4970-87a3-83d7e1b788ba/cinder-api/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.261092 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e95290f6-0498-4bfa-8653-3a53edf4f01f/cinder-scheduler/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.298488 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e95290f6-0498-4bfa-8653-3a53edf4f01f/probe/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.331630 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-52fgv_88dba5f2-ff1f-420f-a1cf-e78fd5512d44/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.365825 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-2446g_c43c5ed8-ee74-481a-9b89-30845f8380b8/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.424831 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-f4d4c4b7-5pcpw_fb7020ef-1f09-4241-9001-eb628c16fd07/dnsmasq-dns/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.435876 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-f4d4c4b7-5pcpw_fb7020ef-1f09-4241-9001-eb628c16fd07/init/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.470158 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-x77kc_bdfde737-ff95-41e6-a124-accfa3f24d58/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.483419 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf/glance-log/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.507644 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf/glance-httpd/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.527220 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2f39439c-442b-407e-9b64-ed1a23e6a97c/glance-log/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.549616 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2f39439c-442b-407e-9b64-ed1a23e6a97c/glance-httpd/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.787596 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-67dd67cb9b-9w4wk_4d2c0905-915e-4504-8454-ee3500220ab3/horizon-log/0.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.942955 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-67dd67cb9b-9w4wk_4d2c0905-915e-4504-8454-ee3500220ab3/horizon/1.log" Jan 20 20:38:53 crc kubenswrapper[4948]: I0120 20:38:53.947015 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-67dd67cb9b-9w4wk_4d2c0905-915e-4504-8454-ee3500220ab3/horizon/2.log" Jan 20 20:38:54 crc kubenswrapper[4948]: I0120 20:38:54.025070 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq_cf7abc7a-4446-4807-af6e-96711d710f9e/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:38:54 crc kubenswrapper[4948]: I0120 20:38:54.066451 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-gbbgp_a036dc78-f9f1-467a-b272-a45b9280bc99/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:38:54 crc kubenswrapper[4948]: I0120 20:38:54.213482 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7c45b45594-rdsj9_413e45d6-d022-4586-82cc-228d8431dce4/keystone-api/0.log" Jan 20 20:38:54 crc kubenswrapper[4948]: I0120 20:38:54.224382 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f/kube-state-metrics/0.log" Jan 20 20:38:54 crc kubenswrapper[4948]: I0120 20:38:54.274055 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2_c6149a97-b5c3-4ec7-8b50-fc3a77843b48/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:38:56 crc kubenswrapper[4948]: I0120 20:38:56.577479 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:38:56 crc kubenswrapper[4948]: E0120 20:38:56.579034 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:39:06 crc kubenswrapper[4948]: E0120 20:39:06.478512 4948 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296" Jan 20 20:39:06 crc kubenswrapper[4948]: E0120 20:39:06.479444 4948 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-00,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296,Command:[chroot /host bash -c echo 'TOOLBOX_NAME=toolbox-osp' > /root/.toolboxrc ; rm -rf \"/var/tmp/sos-osp\" && mkdir -p \"/var/tmp/sos-osp\" && sudo podman rm --force toolbox-osp; sudo --preserve-env podman pull --authfile /var/lib/kubelet/config.json registry.redhat.io/rhel9/support-tools && toolbox sos report --batch --all-logs --only-plugins block,cifs,crio,devicemapper,devices,firewall_tables,firewalld,iscsi,lvm2,memory,multipath,nfs,nis,nvme,podman,process,processor,selinux,scsi,udev,logs,crypto --tmp-dir=\"/var/tmp/sos-osp\" && if [[ \"$(ls /var/log/pods/*/{*.log.*,*/*.log.*} 2>/dev/null)\" != '' ]]; then tar --ignore-failed-read --warning=no-file-changed -cJf \"/var/tmp/sos-osp/podlogs.tar.xz\" --transform 's,^,podlogs/,' /var/log/pods/*/{*.log.*,*/*.log.*} || true; fi],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:TMOUT,Value:900,ValueFrom:nil,},EnvVar{Name:HOST,Value:/host,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host,ReadOnly:false,MountPath:/host,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j2jx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod crc-debug-lzwwn_openshift-must-gather-7qrk8(b8f21fee-2a4f-405d-b35b-d63530d51409): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 20 20:39:06 crc kubenswrapper[4948]: E0120 20:39:06.480662 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" podUID="b8f21fee-2a4f-405d-b35b-d63530d51409" Jan 20 20:39:07 crc kubenswrapper[4948]: E0120 20:39:07.261419 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296\\\"\"" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" podUID="b8f21fee-2a4f-405d-b35b-d63530d51409" Jan 20 20:39:10 crc kubenswrapper[4948]: I0120 20:39:10.570678 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:39:10 crc kubenswrapper[4948]: E0120 20:39:10.571177 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:39:14 crc kubenswrapper[4948]: I0120 20:39:14.497861 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/controller/0.log" Jan 20 20:39:14 crc kubenswrapper[4948]: I0120 20:39:14.507108 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/kube-rbac-proxy/0.log" Jan 20 20:39:14 crc kubenswrapper[4948]: I0120 20:39:14.535910 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/controller/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.345789 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.364754 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/reloader/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.374183 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr-metrics/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.387234 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.412483 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy-frr/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.418320 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-frr-files/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.434915 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-reloader/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.447064 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-metrics/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.459473 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-mxgmc_06d4b8b1-3c5f-4736-9492-bc33db43f510/frr-k8s-webhook-server/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.502757 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7998c69bcc-rkwld_a422b9d2-2fe8-485a-a7c7-fb0fa96706c9/manager/0.log" Jan 20 20:39:16 crc kubenswrapper[4948]: I0120 20:39:16.516162 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-989f8776d-mst22_3eb6ce14-f5fb-4e93-8f16-d4b0eec67237/webhook-server/0.log" Jan 20 20:39:17 crc kubenswrapper[4948]: I0120 20:39:17.033065 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/speaker/0.log" Jan 20 20:39:17 crc kubenswrapper[4948]: I0120 20:39:17.042762 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/kube-rbac-proxy/0.log" Jan 20 20:39:19 crc kubenswrapper[4948]: I0120 20:39:19.405110 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" event={"ID":"b8f21fee-2a4f-405d-b35b-d63530d51409","Type":"ContainerStarted","Data":"0c000b1a036fd6ebeb2916ee86a24391f667c3dd6225f6b25ba7cdd186b46d49"} Jan 20 20:39:19 crc kubenswrapper[4948]: I0120 20:39:19.431349 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" podStartSLOduration=1.318318446 podStartE2EDuration="30.431324401s" podCreationTimestamp="2026-01-20 20:38:49 +0000 UTC" firstStartedPulling="2026-01-20 20:38:49.898352948 +0000 UTC m=+2957.849077917" lastFinishedPulling="2026-01-20 20:39:19.011358903 +0000 UTC m=+2986.962083872" observedRunningTime="2026-01-20 20:39:19.428904373 +0000 UTC m=+2987.379629342" watchObservedRunningTime="2026-01-20 20:39:19.431324401 +0000 UTC m=+2987.382049370" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.010438 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_d6257c47-078f-4d41-942c-45d7e57b8c15/memcached/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.045503 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-79d47bbd4f-rpj54_4005ab42-8a7a-4951-ba75-b1f7a3d2a063/neutron-api/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.062315 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-79d47bbd4f-rpj54_4005ab42-8a7a-4951-ba75-b1f7a3d2a063/neutron-httpd/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.086511 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2_a14c4acd-7573-4e72-9ab4-c1263844f59e/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.163286 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_0bef1366-a94a-4d51-a5b4-53fe9a86a4d9/nova-api-log/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.360489 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_0bef1366-a94a-4d51-a5b4-53fe9a86a4d9/nova-api-api/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.453421 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_8c56770f-e8ae-4540-9bb0-34123665502e/nova-cell0-conductor-conductor/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.533289 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_d3f5f7e6-247c-41c7-877c-f43cf1b1f412/nova-cell1-conductor-conductor/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.601781 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_8dc0455c-7835-456a-b537-34836da2cdff/nova-cell1-novncproxy-novncproxy/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.662301 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-x5v8p_4bb85740-d63d-4363-91af-c07eecf6ab45/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:22 crc kubenswrapper[4948]: I0120 20:39:22.727808 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_405260b6-bbf5-4d0b-8a81-686340252185/nova-metadata-log/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.365558 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_405260b6-bbf5-4d0b-8a81-686340252185/nova-metadata-metadata/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.477509 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_7d52d1e7-1dc7-4341-b483-da6863189804/nova-scheduler-scheduler/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.500418 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_68260cc0-7bcb-4582-8154-60bbcdfbcf04/galera/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.519716 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_68260cc0-7bcb-4582-8154-60bbcdfbcf04/mysql-bootstrap/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.553987 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_67ccceb8-ab3c-4304-9336-8938675a1012/galera/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.580325 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_67ccceb8-ab3c-4304-9336-8938675a1012/mysql-bootstrap/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.592408 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_d1222f27-af2a-46fd-a296-37bdb8db4486/openstackclient/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.636009 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-hpg27_46328967-e69a-4d46-86d6-ba1af248c8f2/ovn-controller/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.653854 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-g8dbf_3bdd9991-773b-4709-a6e1-426c1fc89d23/openstack-network-exporter/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.698772 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dgkh9_7e8635e1-cc17-4a2e-9b45-b76043df05d4/ovsdb-server/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.726064 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dgkh9_7e8635e1-cc17-4a2e-9b45-b76043df05d4/ovs-vswitchd/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.764199 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dgkh9_7e8635e1-cc17-4a2e-9b45-b76043df05d4/ovsdb-server-init/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.936145 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-7tm27_ee6e6079-b341-4648-b640-da45d2f27ed5/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.951069 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8beae232-ff35-4a9c-9f68-0d9c20e65c67/ovn-northd/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.972058 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8beae232-ff35-4a9c-9f68-0d9c20e65c67/openstack-network-exporter/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.986717 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_db2122b2-3a50-4587-944d-ca8aa51882ab/ovsdbserver-nb/0.log" Jan 20 20:39:23 crc kubenswrapper[4948]: I0120 20:39:23.996883 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_db2122b2-3a50-4587-944d-ca8aa51882ab/openstack-network-exporter/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.027390 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_25b56954-2973-439d-a473-019d32e6ec0c/ovsdbserver-sb/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.039534 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_25b56954-2973-439d-a473-019d32e6ec0c/openstack-network-exporter/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.090320 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6965b8b8b4-5f4wt_923c67b1-e9b6-4c67-86aa-96dc2760ba19/placement-log/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.116980 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6965b8b8b4-5f4wt_923c67b1-e9b6-4c67-86aa-96dc2760ba19/placement-api/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.135612 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_899d2813-4685-40b7-ba95-60d3126802a2/rabbitmq/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.147376 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_899d2813-4685-40b7-ba95-60d3126802a2/setup-container/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.173993 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8c30b121-20f6-47ad-89e0-ce511df4efb7/rabbitmq/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.184154 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8c30b121-20f6-47ad-89e0-ce511df4efb7/setup-container/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.204038 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p_c2713e4e-89b8-4d59-9a34-947cd7af2e0e/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.220863 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-2bxbf_cd1a8ab5-15f0-4194-bb29-4bd56b856c33/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.243998 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-482zl_5a4fea5f-1b46-482d-a956-9307be45284c/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.256690 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-kgkms_1a69232e-a7d3-43f7-a730-b21ffbf62e38/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.273504 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-spfvx_fc3ad5c4-f353-42b4-8266-6180aae6f48f/ssh-known-hosts-edpm-deployment/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.385796 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-646f4c575-wzbtn_e0464310-34e8-4747-9a37-6a9ce764a73a/proxy-httpd/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.434677 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-646f4c575-wzbtn_e0464310-34e8-4747-9a37-6a9ce764a73a/proxy-server/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.446363 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-ctgvx_ce6ef66a-e0b9-4dbf-9c1b-262e952e9845/swift-ring-rebalance/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.484804 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/account-server/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.504544 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/account-replicator/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.508831 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/account-auditor/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.515465 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/account-reaper/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.550588 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/container-server/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.570776 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/container-replicator/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.583371 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/container-auditor/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.593268 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/container-updater/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.624348 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-server/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.646803 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-replicator/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.663000 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-auditor/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.671017 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-updater/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.686531 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-expirer/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.701833 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/rsync/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.711947 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/swift-recon-cron/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.782788 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-ht82b_28bbc15a-1085-4cbd-9dac-0180526816bc/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.811466 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_84db0de1-b0d6-4a7f-88d8-6470a493ef78/tempest-tests-tempest-tests-runner/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.819031 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_5db0e8eb-349c-41d5-96d3-9025f96d2869/test-operator-logs-container/0.log" Jan 20 20:39:24 crc kubenswrapper[4948]: I0120 20:39:24.849093 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg_ada055ea-6aa5-4e75-ad5b-4caec7647608/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:39:25 crc kubenswrapper[4948]: I0120 20:39:25.570924 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:39:25 crc kubenswrapper[4948]: E0120 20:39:25.571530 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:39:34 crc kubenswrapper[4948]: I0120 20:39:34.800481 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/extract/0.log" Jan 20 20:39:34 crc kubenswrapper[4948]: I0120 20:39:34.812869 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/util/0.log" Jan 20 20:39:34 crc kubenswrapper[4948]: I0120 20:39:34.827736 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/pull/0.log" Jan 20 20:39:34 crc kubenswrapper[4948]: I0120 20:39:34.889935 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-6vfzk_ef41048d-32d0-4b45-98ef-181e13e62c26/manager/0.log" Jan 20 20:39:34 crc kubenswrapper[4948]: I0120 20:39:34.943134 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-2k89b_d6a36d62-a638-45c5-956a-12cb6f1ced24/manager/0.log" Jan 20 20:39:34 crc kubenswrapper[4948]: I0120 20:39:34.960812 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-6mp4q_d507465c-a0e3-494e-9e20-ef8c3517e059/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.024071 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-x9hmd_b78116d1-a584-49fa-ab14-86f78ce62420/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.042850 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-m8f25_d8461566-61e6-495d-b1ad-c0178c2eb849/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.068656 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-b7j48_6f758308-6a33-4dc5-996e-beae970d4083/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.359627 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-xgc4z_09ceeac6-c058-41a8-a0d6-07b4bde73893/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.372784 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-6xdw4_233a0ffe-a99e-4268-93ed-a2a20cb2c7ab/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.473589 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-hkwvp_ed91900c-0efb-4184-8d92-d11fb7ae82b7/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.494243 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-snszj_38d63cbf-6bc2-4c48-9905-88c65334d42a/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.553560 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-7qmgq_61ba0da3-99a5-4b43-a2fb-190260ab8f3a/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.603025 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-5mlm4_61da457f-7595-4df3-8705-e34138ec590d/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.703069 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-phpvf_094e4268-74c4-40e5-8f39-b6090b284c27/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.714958 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-k9n27_d4f3075e-95f9-432a-bfcd-621b6cbe2615/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.729994 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl_40c9112e-c5f0-4cf7-8039-f50ff4640ba9/manager/0.log" Jan 20 20:39:35 crc kubenswrapper[4948]: I0120 20:39:35.861167 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5fcf846598-7x9nh_6d523c92-ebbc-4860-9bcc-45ef88372f2b/operator/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.470446 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c9b95f56c-kd6qw_0a88f765-46a8-4252-832c-ccf595a0f1d2/manager/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.482816 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-fckw5_e98fafb2-a9ef-4252-a236-be3c009d42b2/registry-server/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.542686 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-zpq74_ebd95a40-2e8d-481a-a842-b8fe125ebdb2/manager/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.572584 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-wnzkb_febd743e-d499-4cc9-9e66-29ac1b4ca89c/manager/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.597441 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-9m5nk_f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0/operator/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.619621 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-56544cf655-ngkkb_80950323-03e4-4aa3-ba31-06043e2a51b9/manager/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.675809 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-rsb9m_910fc292-11a6-47de-80e6-59cc027e972c/manager/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.691377 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-2bt9t_5a25aeaf-8323-46a9-8c2a-e000321478ee/manager/0.log" Jan 20 20:39:37 crc kubenswrapper[4948]: I0120 20:39:37.704321 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-52fnn_76b9cf9a-a325-4528-8f35-3d0b94060ef1/manager/0.log" Jan 20 20:39:38 crc kubenswrapper[4948]: I0120 20:39:38.583594 4948 generic.go:334] "Generic (PLEG): container finished" podID="b8f21fee-2a4f-405d-b35b-d63530d51409" containerID="0c000b1a036fd6ebeb2916ee86a24391f667c3dd6225f6b25ba7cdd186b46d49" exitCode=0 Jan 20 20:39:38 crc kubenswrapper[4948]: I0120 20:39:38.584077 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" event={"ID":"b8f21fee-2a4f-405d-b35b-d63530d51409","Type":"ContainerDied","Data":"0c000b1a036fd6ebeb2916ee86a24391f667c3dd6225f6b25ba7cdd186b46d49"} Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.570338 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:39:39 crc kubenswrapper[4948]: E0120 20:39:39.570549 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.728871 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.761865 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-7qrk8/crc-debug-lzwwn"] Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.773392 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-7qrk8/crc-debug-lzwwn"] Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.811859 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2jx7\" (UniqueName: \"kubernetes.io/projected/b8f21fee-2a4f-405d-b35b-d63530d51409-kube-api-access-j2jx7\") pod \"b8f21fee-2a4f-405d-b35b-d63530d51409\" (UID: \"b8f21fee-2a4f-405d-b35b-d63530d51409\") " Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.812005 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f21fee-2a4f-405d-b35b-d63530d51409-host\") pod \"b8f21fee-2a4f-405d-b35b-d63530d51409\" (UID: \"b8f21fee-2a4f-405d-b35b-d63530d51409\") " Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.812274 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b8f21fee-2a4f-405d-b35b-d63530d51409-host" (OuterVolumeSpecName: "host") pod "b8f21fee-2a4f-405d-b35b-d63530d51409" (UID: "b8f21fee-2a4f-405d-b35b-d63530d51409"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.812822 4948 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b8f21fee-2a4f-405d-b35b-d63530d51409-host\") on node \"crc\" DevicePath \"\"" Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.818092 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f21fee-2a4f-405d-b35b-d63530d51409-kube-api-access-j2jx7" (OuterVolumeSpecName: "kube-api-access-j2jx7") pod "b8f21fee-2a4f-405d-b35b-d63530d51409" (UID: "b8f21fee-2a4f-405d-b35b-d63530d51409"). InnerVolumeSpecName "kube-api-access-j2jx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:39:39 crc kubenswrapper[4948]: I0120 20:39:39.914811 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2jx7\" (UniqueName: \"kubernetes.io/projected/b8f21fee-2a4f-405d-b35b-d63530d51409-kube-api-access-j2jx7\") on node \"crc\" DevicePath \"\"" Jan 20 20:39:40 crc kubenswrapper[4948]: I0120 20:39:40.580854 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8f21fee-2a4f-405d-b35b-d63530d51409" path="/var/lib/kubelet/pods/b8f21fee-2a4f-405d-b35b-d63530d51409/volumes" Jan 20 20:39:40 crc kubenswrapper[4948]: I0120 20:39:40.614296 4948 scope.go:117] "RemoveContainer" containerID="0c000b1a036fd6ebeb2916ee86a24391f667c3dd6225f6b25ba7cdd186b46d49" Jan 20 20:39:40 crc kubenswrapper[4948]: I0120 20:39:40.614522 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/crc-debug-lzwwn" Jan 20 20:39:40 crc kubenswrapper[4948]: I0120 20:39:40.954674 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-7qrk8/crc-debug-f6qkz"] Jan 20 20:39:40 crc kubenswrapper[4948]: E0120 20:39:40.955539 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f21fee-2a4f-405d-b35b-d63530d51409" containerName="container-00" Jan 20 20:39:40 crc kubenswrapper[4948]: I0120 20:39:40.955557 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f21fee-2a4f-405d-b35b-d63530d51409" containerName="container-00" Jan 20 20:39:40 crc kubenswrapper[4948]: I0120 20:39:40.955826 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f21fee-2a4f-405d-b35b-d63530d51409" containerName="container-00" Jan 20 20:39:40 crc kubenswrapper[4948]: I0120 20:39:40.956595 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.040030 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c17ccf45-4ddb-4d08-8895-639861993599-host\") pod \"crc-debug-f6qkz\" (UID: \"c17ccf45-4ddb-4d08-8895-639861993599\") " pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.040549 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tnv6\" (UniqueName: \"kubernetes.io/projected/c17ccf45-4ddb-4d08-8895-639861993599-kube-api-access-4tnv6\") pod \"crc-debug-f6qkz\" (UID: \"c17ccf45-4ddb-4d08-8895-639861993599\") " pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.142825 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tnv6\" (UniqueName: \"kubernetes.io/projected/c17ccf45-4ddb-4d08-8895-639861993599-kube-api-access-4tnv6\") pod \"crc-debug-f6qkz\" (UID: \"c17ccf45-4ddb-4d08-8895-639861993599\") " pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.142926 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c17ccf45-4ddb-4d08-8895-639861993599-host\") pod \"crc-debug-f6qkz\" (UID: \"c17ccf45-4ddb-4d08-8895-639861993599\") " pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.143052 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c17ccf45-4ddb-4d08-8895-639861993599-host\") pod \"crc-debug-f6qkz\" (UID: \"c17ccf45-4ddb-4d08-8895-639861993599\") " pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.164256 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tnv6\" (UniqueName: \"kubernetes.io/projected/c17ccf45-4ddb-4d08-8895-639861993599-kube-api-access-4tnv6\") pod \"crc-debug-f6qkz\" (UID: \"c17ccf45-4ddb-4d08-8895-639861993599\") " pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.276037 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.627540 4948 generic.go:334] "Generic (PLEG): container finished" podID="c17ccf45-4ddb-4d08-8895-639861993599" containerID="a67731c87ec0e32c2e4100d2e38a70d28371789e6f31059d9db6081025f21a70" exitCode=1 Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.627881 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" event={"ID":"c17ccf45-4ddb-4d08-8895-639861993599","Type":"ContainerDied","Data":"a67731c87ec0e32c2e4100d2e38a70d28371789e6f31059d9db6081025f21a70"} Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.627915 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" event={"ID":"c17ccf45-4ddb-4d08-8895-639861993599","Type":"ContainerStarted","Data":"5cbe521ac4880954a449f5acb405e2681c210363aaf41fb60e750eee07b92a0f"} Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.666580 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-7qrk8/crc-debug-f6qkz"] Jan 20 20:39:41 crc kubenswrapper[4948]: I0120 20:39:41.678164 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-7qrk8/crc-debug-f6qkz"] Jan 20 20:39:42 crc kubenswrapper[4948]: I0120 20:39:42.763607 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:42 crc kubenswrapper[4948]: I0120 20:39:42.875294 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c17ccf45-4ddb-4d08-8895-639861993599-host\") pod \"c17ccf45-4ddb-4d08-8895-639861993599\" (UID: \"c17ccf45-4ddb-4d08-8895-639861993599\") " Jan 20 20:39:42 crc kubenswrapper[4948]: I0120 20:39:42.875493 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tnv6\" (UniqueName: \"kubernetes.io/projected/c17ccf45-4ddb-4d08-8895-639861993599-kube-api-access-4tnv6\") pod \"c17ccf45-4ddb-4d08-8895-639861993599\" (UID: \"c17ccf45-4ddb-4d08-8895-639861993599\") " Jan 20 20:39:42 crc kubenswrapper[4948]: I0120 20:39:42.875589 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c17ccf45-4ddb-4d08-8895-639861993599-host" (OuterVolumeSpecName: "host") pod "c17ccf45-4ddb-4d08-8895-639861993599" (UID: "c17ccf45-4ddb-4d08-8895-639861993599"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:39:42 crc kubenswrapper[4948]: I0120 20:39:42.876341 4948 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c17ccf45-4ddb-4d08-8895-639861993599-host\") on node \"crc\" DevicePath \"\"" Jan 20 20:39:42 crc kubenswrapper[4948]: I0120 20:39:42.880627 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c17ccf45-4ddb-4d08-8895-639861993599-kube-api-access-4tnv6" (OuterVolumeSpecName: "kube-api-access-4tnv6") pod "c17ccf45-4ddb-4d08-8895-639861993599" (UID: "c17ccf45-4ddb-4d08-8895-639861993599"). InnerVolumeSpecName "kube-api-access-4tnv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:39:42 crc kubenswrapper[4948]: I0120 20:39:42.978077 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tnv6\" (UniqueName: \"kubernetes.io/projected/c17ccf45-4ddb-4d08-8895-639861993599-kube-api-access-4tnv6\") on node \"crc\" DevicePath \"\"" Jan 20 20:39:43 crc kubenswrapper[4948]: I0120 20:39:43.397558 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4pnmq_203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3/control-plane-machine-set-operator/0.log" Jan 20 20:39:43 crc kubenswrapper[4948]: I0120 20:39:43.416727 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-hxwlm_666e60ed-f213-4af4-a4a9-969864d1fd0e/kube-rbac-proxy/0.log" Jan 20 20:39:43 crc kubenswrapper[4948]: I0120 20:39:43.425861 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-hxwlm_666e60ed-f213-4af4-a4a9-969864d1fd0e/machine-api-operator/0.log" Jan 20 20:39:43 crc kubenswrapper[4948]: I0120 20:39:43.647853 4948 scope.go:117] "RemoveContainer" containerID="a67731c87ec0e32c2e4100d2e38a70d28371789e6f31059d9db6081025f21a70" Jan 20 20:39:43 crc kubenswrapper[4948]: I0120 20:39:43.647915 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/crc-debug-f6qkz" Jan 20 20:39:44 crc kubenswrapper[4948]: I0120 20:39:44.580532 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c17ccf45-4ddb-4d08-8895-639861993599" path="/var/lib/kubelet/pods/c17ccf45-4ddb-4d08-8895-639861993599/volumes" Jan 20 20:39:49 crc kubenswrapper[4948]: I0120 20:39:49.194001 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-dt9ht_0a4be8e0-f8af-4f0d-8230-37fd71e2cc81/cert-manager-controller/0.log" Jan 20 20:39:49 crc kubenswrapper[4948]: I0120 20:39:49.217268 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-82hbd_1973fd2f-85c7-4fbb-92b0-0973744d9d00/cert-manager-cainjector/0.log" Jan 20 20:39:49 crc kubenswrapper[4948]: I0120 20:39:49.227474 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-fckz7_5474f4e5-fa0d-4931-b732-4a1d0e06c858/cert-manager-webhook/0.log" Jan 20 20:39:52 crc kubenswrapper[4948]: I0120 20:39:52.592638 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:39:52 crc kubenswrapper[4948]: E0120 20:39:52.593566 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:39:55 crc kubenswrapper[4948]: I0120 20:39:55.051500 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-czsd9_a0bd44ac-39a0-4aed-8a23-d12330d46924/nmstate-console-plugin/0.log" Jan 20 20:39:55 crc kubenswrapper[4948]: I0120 20:39:55.070843 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-nqpgc_34b9a637-f29d-49ad-961c-d923e71907e1/nmstate-handler/0.log" Jan 20 20:39:55 crc kubenswrapper[4948]: I0120 20:39:55.085046 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jq57s_d7a43a4d-6505-4105-bfb8-c1239d0436e8/nmstate-metrics/0.log" Jan 20 20:39:55 crc kubenswrapper[4948]: I0120 20:39:55.100620 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jq57s_d7a43a4d-6505-4105-bfb8-c1239d0436e8/kube-rbac-proxy/0.log" Jan 20 20:39:55 crc kubenswrapper[4948]: I0120 20:39:55.117894 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-9ldq2_d72955e0-ce7e-4d8f-be8a-b22eee46ec69/nmstate-operator/0.log" Jan 20 20:39:55 crc kubenswrapper[4948]: I0120 20:39:55.129721 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-6lt8c_b4431242-1662-43bd-bbfc-192d87f5393b/nmstate-webhook/0.log" Jan 20 20:40:06 crc kubenswrapper[4948]: I0120 20:40:06.665036 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/controller/0.log" Jan 20 20:40:06 crc kubenswrapper[4948]: I0120 20:40:06.671544 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/kube-rbac-proxy/0.log" Jan 20 20:40:06 crc kubenswrapper[4948]: I0120 20:40:06.700619 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/controller/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.570465 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:40:07 crc kubenswrapper[4948]: E0120 20:40:07.571085 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.796038 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.809263 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/reloader/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.817053 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr-metrics/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.829041 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.845111 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy-frr/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.854302 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-frr-files/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.861846 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-reloader/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.872340 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-metrics/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.885377 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-mxgmc_06d4b8b1-3c5f-4736-9492-bc33db43f510/frr-k8s-webhook-server/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.906289 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7998c69bcc-rkwld_a422b9d2-2fe8-485a-a7c7-fb0fa96706c9/manager/0.log" Jan 20 20:40:07 crc kubenswrapper[4948]: I0120 20:40:07.915124 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-989f8776d-mst22_3eb6ce14-f5fb-4e93-8f16-d4b0eec67237/webhook-server/0.log" Jan 20 20:40:08 crc kubenswrapper[4948]: I0120 20:40:08.232331 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/speaker/0.log" Jan 20 20:40:08 crc kubenswrapper[4948]: I0120 20:40:08.245116 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/kube-rbac-proxy/0.log" Jan 20 20:40:12 crc kubenswrapper[4948]: I0120 20:40:12.873543 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8_d79fcc60-85eb-450d-8d37-5b00b0af4ea0/extract/0.log" Jan 20 20:40:12 crc kubenswrapper[4948]: I0120 20:40:12.882734 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8_d79fcc60-85eb-450d-8d37-5b00b0af4ea0/util/0.log" Jan 20 20:40:12 crc kubenswrapper[4948]: I0120 20:40:12.891425 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8_d79fcc60-85eb-450d-8d37-5b00b0af4ea0/pull/0.log" Jan 20 20:40:12 crc kubenswrapper[4948]: I0120 20:40:12.905560 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7_d0fed87f-472d-480c-8006-2c2dc60df61e/extract/0.log" Jan 20 20:40:12 crc kubenswrapper[4948]: I0120 20:40:12.918376 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7_d0fed87f-472d-480c-8006-2c2dc60df61e/util/0.log" Jan 20 20:40:12 crc kubenswrapper[4948]: I0120 20:40:12.935463 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7_d0fed87f-472d-480c-8006-2c2dc60df61e/pull/0.log" Jan 20 20:40:13 crc kubenswrapper[4948]: I0120 20:40:13.461345 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cpztv_5882349f-db20-4e02-80dd-5a7f6b4e5f0f/registry-server/0.log" Jan 20 20:40:13 crc kubenswrapper[4948]: I0120 20:40:13.467090 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cpztv_5882349f-db20-4e02-80dd-5a7f6b4e5f0f/extract-utilities/0.log" Jan 20 20:40:13 crc kubenswrapper[4948]: I0120 20:40:13.478296 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-cpztv_5882349f-db20-4e02-80dd-5a7f6b4e5f0f/extract-content/0.log" Jan 20 20:40:13 crc kubenswrapper[4948]: I0120 20:40:13.982758 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2jd7_52223d24-be7c-4761-8f46-efcc30f37f8b/registry-server/0.log" Jan 20 20:40:13 crc kubenswrapper[4948]: I0120 20:40:13.990030 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2jd7_52223d24-be7c-4761-8f46-efcc30f37f8b/extract-utilities/0.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.000616 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2jd7_52223d24-be7c-4761-8f46-efcc30f37f8b/extract-content/0.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.018016 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-z8fwl_7cf25c7d-e351-4a2e-8992-47542811fb1f/marketplace-operator/1.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.019251 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-z8fwl_7cf25c7d-e351-4a2e-8992-47542811fb1f/marketplace-operator/0.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.131592 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsxfw_f8d1e5d7-2511-47ad-b240-677792863a32/registry-server/0.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.140877 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsxfw_f8d1e5d7-2511-47ad-b240-677792863a32/extract-utilities/0.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.146063 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsxfw_f8d1e5d7-2511-47ad-b240-677792863a32/extract-content/0.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.526934 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kpqs5_29572b48-7ca5-4e09-83d8-dcf2cc40682b/registry-server/0.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.532788 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kpqs5_29572b48-7ca5-4e09-83d8-dcf2cc40682b/extract-utilities/0.log" Jan 20 20:40:14 crc kubenswrapper[4948]: I0120 20:40:14.540514 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kpqs5_29572b48-7ca5-4e09-83d8-dcf2cc40682b/extract-content/0.log" Jan 20 20:40:20 crc kubenswrapper[4948]: I0120 20:40:20.572506 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:40:20 crc kubenswrapper[4948]: E0120 20:40:20.573360 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:40:31 crc kubenswrapper[4948]: I0120 20:40:31.569837 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:40:31 crc kubenswrapper[4948]: E0120 20:40:31.570842 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:40:44 crc kubenswrapper[4948]: I0120 20:40:44.576146 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:40:44 crc kubenswrapper[4948]: E0120 20:40:44.580923 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:40:55 crc kubenswrapper[4948]: I0120 20:40:55.570741 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:40:55 crc kubenswrapper[4948]: E0120 20:40:55.571412 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:41:09 crc kubenswrapper[4948]: I0120 20:41:09.570168 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:41:09 crc kubenswrapper[4948]: E0120 20:41:09.570905 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:41:20 crc kubenswrapper[4948]: I0120 20:41:20.570144 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:41:20 crc kubenswrapper[4948]: E0120 20:41:20.570969 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:41:34 crc kubenswrapper[4948]: I0120 20:41:34.570530 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:41:34 crc kubenswrapper[4948]: E0120 20:41:34.571346 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:41:41 crc kubenswrapper[4948]: I0120 20:41:41.687747 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/controller/0.log" Jan 20 20:41:41 crc kubenswrapper[4948]: I0120 20:41:41.693903 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/kube-rbac-proxy/0.log" Jan 20 20:41:41 crc kubenswrapper[4948]: I0120 20:41:41.718062 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/controller/0.log" Jan 20 20:41:42 crc kubenswrapper[4948]: I0120 20:41:42.084745 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-dt9ht_0a4be8e0-f8af-4f0d-8230-37fd71e2cc81/cert-manager-controller/0.log" Jan 20 20:41:42 crc kubenswrapper[4948]: I0120 20:41:42.098913 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-82hbd_1973fd2f-85c7-4fbb-92b0-0973744d9d00/cert-manager-cainjector/0.log" Jan 20 20:41:42 crc kubenswrapper[4948]: I0120 20:41:42.118506 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-fckz7_5474f4e5-fa0d-4931-b732-4a1d0e06c858/cert-manager-webhook/0.log" Jan 20 20:41:42 crc kubenswrapper[4948]: I0120 20:41:42.978477 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr/0.log" Jan 20 20:41:42 crc kubenswrapper[4948]: I0120 20:41:42.988963 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/reloader/0.log" Jan 20 20:41:42 crc kubenswrapper[4948]: I0120 20:41:42.994350 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr-metrics/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.046034 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.053020 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy-frr/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.062070 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-frr-files/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.074741 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-reloader/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.082075 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-metrics/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.090312 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-mxgmc_06d4b8b1-3c5f-4736-9492-bc33db43f510/frr-k8s-webhook-server/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.111780 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7998c69bcc-rkwld_a422b9d2-2fe8-485a-a7c7-fb0fa96706c9/manager/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.125833 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-989f8776d-mst22_3eb6ce14-f5fb-4e93-8f16-d4b0eec67237/webhook-server/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.447667 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/speaker/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.455786 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/kube-rbac-proxy/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.594514 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/extract/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.607529 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/util/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.621692 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/pull/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.701770 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-6vfzk_ef41048d-32d0-4b45-98ef-181e13e62c26/manager/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.745946 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-2k89b_d6a36d62-a638-45c5-956a-12cb6f1ced24/manager/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.759248 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-6mp4q_d507465c-a0e3-494e-9e20-ef8c3517e059/manager/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.834899 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-x9hmd_b78116d1-a584-49fa-ab14-86f78ce62420/manager/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.845770 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-m8f25_d8461566-61e6-495d-b1ad-c0178c2eb849/manager/0.log" Jan 20 20:41:43 crc kubenswrapper[4948]: I0120 20:41:43.871284 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-b7j48_6f758308-6a33-4dc5-996e-beae970d4083/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.116334 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-xgc4z_09ceeac6-c058-41a8-a0d6-07b4bde73893/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.128188 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-6xdw4_233a0ffe-a99e-4268-93ed-a2a20cb2c7ab/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.201932 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-hkwvp_ed91900c-0efb-4184-8d92-d11fb7ae82b7/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.216739 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-snszj_38d63cbf-6bc2-4c48-9905-88c65334d42a/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.256002 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-7qmgq_61ba0da3-99a5-4b43-a2fb-190260ab8f3a/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.302448 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-5mlm4_61da457f-7595-4df3-8705-e34138ec590d/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.377559 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-phpvf_094e4268-74c4-40e5-8f39-b6090b284c27/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.397925 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-k9n27_d4f3075e-95f9-432a-bfcd-621b6cbe2615/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.413308 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl_40c9112e-c5f0-4cf7-8039-f50ff4640ba9/manager/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.572054 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5fcf846598-7x9nh_6d523c92-ebbc-4860-9bcc-45ef88372f2b/operator/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.950483 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-dt9ht_0a4be8e0-f8af-4f0d-8230-37fd71e2cc81/cert-manager-controller/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.965076 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-82hbd_1973fd2f-85c7-4fbb-92b0-0973744d9d00/cert-manager-cainjector/0.log" Jan 20 20:41:44 crc kubenswrapper[4948]: I0120 20:41:44.980353 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-fckz7_5474f4e5-fa0d-4931-b732-4a1d0e06c858/cert-manager-webhook/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.680764 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c9b95f56c-kd6qw_0a88f765-46a8-4252-832c-ccf595a0f1d2/manager/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.695270 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-fckw5_e98fafb2-a9ef-4252-a236-be3c009d42b2/registry-server/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.741603 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-zpq74_ebd95a40-2e8d-481a-a842-b8fe125ebdb2/manager/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.763012 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-wnzkb_febd743e-d499-4cc9-9e66-29ac1b4ca89c/manager/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.780355 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-9m5nk_f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0/operator/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.805816 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-56544cf655-ngkkb_80950323-03e4-4aa3-ba31-06043e2a51b9/manager/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.859326 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-rsb9m_910fc292-11a6-47de-80e6-59cc027e972c/manager/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.870692 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-2bt9t_5a25aeaf-8323-46a9-8c2a-e000321478ee/manager/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.882087 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-52fnn_76b9cf9a-a325-4528-8f35-3d0b94060ef1/manager/0.log" Jan 20 20:41:45 crc kubenswrapper[4948]: I0120 20:41:45.980833 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4pnmq_203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3/control-plane-machine-set-operator/0.log" Jan 20 20:41:46 crc kubenswrapper[4948]: I0120 20:41:46.000006 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-hxwlm_666e60ed-f213-4af4-a4a9-969864d1fd0e/kube-rbac-proxy/0.log" Jan 20 20:41:46 crc kubenswrapper[4948]: I0120 20:41:46.025426 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-hxwlm_666e60ed-f213-4af4-a4a9-969864d1fd0e/machine-api-operator/0.log" Jan 20 20:41:46 crc kubenswrapper[4948]: I0120 20:41:46.879769 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/extract/0.log" Jan 20 20:41:46 crc kubenswrapper[4948]: I0120 20:41:46.896178 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/util/0.log" Jan 20 20:41:46 crc kubenswrapper[4948]: I0120 20:41:46.912619 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/pull/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.009016 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-6vfzk_ef41048d-32d0-4b45-98ef-181e13e62c26/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.066813 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-2k89b_d6a36d62-a638-45c5-956a-12cb6f1ced24/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.083549 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-6mp4q_d507465c-a0e3-494e-9e20-ef8c3517e059/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.148146 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-x9hmd_b78116d1-a584-49fa-ab14-86f78ce62420/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.161300 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-m8f25_d8461566-61e6-495d-b1ad-c0178c2eb849/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.184350 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-b7j48_6f758308-6a33-4dc5-996e-beae970d4083/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.434069 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-xgc4z_09ceeac6-c058-41a8-a0d6-07b4bde73893/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.446199 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-6xdw4_233a0ffe-a99e-4268-93ed-a2a20cb2c7ab/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.510172 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-hkwvp_ed91900c-0efb-4184-8d92-d11fb7ae82b7/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.523998 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-snszj_38d63cbf-6bc2-4c48-9905-88c65334d42a/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.555790 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-7qmgq_61ba0da3-99a5-4b43-a2fb-190260ab8f3a/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.598368 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-5mlm4_61da457f-7595-4df3-8705-e34138ec590d/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.683667 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-phpvf_094e4268-74c4-40e5-8f39-b6090b284c27/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.695508 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-k9n27_d4f3075e-95f9-432a-bfcd-621b6cbe2615/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.716138 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl_40c9112e-c5f0-4cf7-8039-f50ff4640ba9/manager/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.808933 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-czsd9_a0bd44ac-39a0-4aed-8a23-d12330d46924/nmstate-console-plugin/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.828282 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5fcf846598-7x9nh_6d523c92-ebbc-4860-9bcc-45ef88372f2b/operator/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.841126 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-nqpgc_34b9a637-f29d-49ad-961c-d923e71907e1/nmstate-handler/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.859396 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jq57s_d7a43a4d-6505-4105-bfb8-c1239d0436e8/nmstate-metrics/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.870204 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jq57s_d7a43a4d-6505-4105-bfb8-c1239d0436e8/kube-rbac-proxy/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.892339 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-9ldq2_d72955e0-ce7e-4d8f-be8a-b22eee46ec69/nmstate-operator/0.log" Jan 20 20:41:47 crc kubenswrapper[4948]: I0120 20:41:47.901726 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-6lt8c_b4431242-1662-43bd-bbfc-192d87f5393b/nmstate-webhook/0.log" Jan 20 20:41:48 crc kubenswrapper[4948]: I0120 20:41:48.871847 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c9b95f56c-kd6qw_0a88f765-46a8-4252-832c-ccf595a0f1d2/manager/0.log" Jan 20 20:41:48 crc kubenswrapper[4948]: I0120 20:41:48.894901 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-fckw5_e98fafb2-a9ef-4252-a236-be3c009d42b2/registry-server/0.log" Jan 20 20:41:48 crc kubenswrapper[4948]: I0120 20:41:48.943294 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-zpq74_ebd95a40-2e8d-481a-a842-b8fe125ebdb2/manager/0.log" Jan 20 20:41:48 crc kubenswrapper[4948]: I0120 20:41:48.966658 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-wnzkb_febd743e-d499-4cc9-9e66-29ac1b4ca89c/manager/0.log" Jan 20 20:41:48 crc kubenswrapper[4948]: I0120 20:41:48.985872 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-9m5nk_f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0/operator/0.log" Jan 20 20:41:49 crc kubenswrapper[4948]: I0120 20:41:49.008850 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-56544cf655-ngkkb_80950323-03e4-4aa3-ba31-06043e2a51b9/manager/0.log" Jan 20 20:41:49 crc kubenswrapper[4948]: I0120 20:41:49.062928 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-rsb9m_910fc292-11a6-47de-80e6-59cc027e972c/manager/0.log" Jan 20 20:41:49 crc kubenswrapper[4948]: I0120 20:41:49.074733 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-2bt9t_5a25aeaf-8323-46a9-8c2a-e000321478ee/manager/0.log" Jan 20 20:41:49 crc kubenswrapper[4948]: I0120 20:41:49.085849 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-52fnn_76b9cf9a-a325-4528-8f35-3d0b94060ef1/manager/0.log" Jan 20 20:41:49 crc kubenswrapper[4948]: I0120 20:41:49.570889 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:41:49 crc kubenswrapper[4948]: E0120 20:41:49.571158 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.352017 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/kube-multus-additional-cni-plugins/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.364121 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/egress-router-binary-copy/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.371474 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/cni-plugins/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.379366 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/bond-cni-plugin/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.387957 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/routeoverride-cni/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.396987 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/whereabouts-cni-bincopy/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.404350 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/whereabouts-cni/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.437034 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-k4fgt_34a4c701-23f8-4d4e-97c0-7ceeaa229d0f/multus-admission-controller/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.445218 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-k4fgt_34a4c701-23f8-4d4e-97c0-7ceeaa229d0f/kube-rbac-proxy/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.484976 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/1.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.574324 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/2.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.607943 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-h4c6s_dbfcfce6-0ab8-40ba-80b2-d391a7dd5418/network-metrics-daemon/0.log" Jan 20 20:41:51 crc kubenswrapper[4948]: I0120 20:41:51.617280 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-h4c6s_dbfcfce6-0ab8-40ba-80b2-d391a7dd5418/kube-rbac-proxy/0.log" Jan 20 20:42:00 crc kubenswrapper[4948]: I0120 20:42:00.570324 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:42:00 crc kubenswrapper[4948]: E0120 20:42:00.571137 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:42:15 crc kubenswrapper[4948]: I0120 20:42:15.570173 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:42:15 crc kubenswrapper[4948]: E0120 20:42:15.571178 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:42:26 crc kubenswrapper[4948]: I0120 20:42:26.569861 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:42:26 crc kubenswrapper[4948]: E0120 20:42:26.570513 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.156341 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hgshd"] Jan 20 20:42:27 crc kubenswrapper[4948]: E0120 20:42:27.157057 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c17ccf45-4ddb-4d08-8895-639861993599" containerName="container-00" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.157075 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c17ccf45-4ddb-4d08-8895-639861993599" containerName="container-00" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.157284 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c17ccf45-4ddb-4d08-8895-639861993599" containerName="container-00" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.158748 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.184262 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hgshd"] Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.353230 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-catalog-content\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.353370 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt2qw\" (UniqueName: \"kubernetes.io/projected/dff70d04-3536-4569-9eef-44a63bac4da2-kube-api-access-rt2qw\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.353482 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-utilities\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.455222 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rt2qw\" (UniqueName: \"kubernetes.io/projected/dff70d04-3536-4569-9eef-44a63bac4da2-kube-api-access-rt2qw\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.455320 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-utilities\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.455434 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-catalog-content\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.455971 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-catalog-content\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.456838 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-utilities\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.483838 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rt2qw\" (UniqueName: \"kubernetes.io/projected/dff70d04-3536-4569-9eef-44a63bac4da2-kube-api-access-rt2qw\") pod \"redhat-operators-hgshd\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.523407 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:27 crc kubenswrapper[4948]: I0120 20:42:27.868207 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hgshd"] Jan 20 20:42:28 crc kubenswrapper[4948]: I0120 20:42:28.207459 4948 generic.go:334] "Generic (PLEG): container finished" podID="dff70d04-3536-4569-9eef-44a63bac4da2" containerID="61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc" exitCode=0 Jan 20 20:42:28 crc kubenswrapper[4948]: I0120 20:42:28.207518 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgshd" event={"ID":"dff70d04-3536-4569-9eef-44a63bac4da2","Type":"ContainerDied","Data":"61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc"} Jan 20 20:42:28 crc kubenswrapper[4948]: I0120 20:42:28.207559 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgshd" event={"ID":"dff70d04-3536-4569-9eef-44a63bac4da2","Type":"ContainerStarted","Data":"9dee3ea726c982d0340ddfbceb6049d2b21af90f958e09d046fdad4ecd2e5980"} Jan 20 20:42:28 crc kubenswrapper[4948]: I0120 20:42:28.214648 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:42:31 crc kubenswrapper[4948]: I0120 20:42:31.258730 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgshd" event={"ID":"dff70d04-3536-4569-9eef-44a63bac4da2","Type":"ContainerStarted","Data":"680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf"} Jan 20 20:42:34 crc kubenswrapper[4948]: I0120 20:42:34.298544 4948 generic.go:334] "Generic (PLEG): container finished" podID="dff70d04-3536-4569-9eef-44a63bac4da2" containerID="680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf" exitCode=0 Jan 20 20:42:34 crc kubenswrapper[4948]: I0120 20:42:34.298621 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgshd" event={"ID":"dff70d04-3536-4569-9eef-44a63bac4da2","Type":"ContainerDied","Data":"680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf"} Jan 20 20:42:36 crc kubenswrapper[4948]: I0120 20:42:36.320553 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgshd" event={"ID":"dff70d04-3536-4569-9eef-44a63bac4da2","Type":"ContainerStarted","Data":"8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6"} Jan 20 20:42:36 crc kubenswrapper[4948]: I0120 20:42:36.346198 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hgshd" podStartSLOduration=2.357537956 podStartE2EDuration="9.346173809s" podCreationTimestamp="2026-01-20 20:42:27 +0000 UTC" firstStartedPulling="2026-01-20 20:42:28.214249965 +0000 UTC m=+3176.164974934" lastFinishedPulling="2026-01-20 20:42:35.202885818 +0000 UTC m=+3183.153610787" observedRunningTime="2026-01-20 20:42:36.341731443 +0000 UTC m=+3184.292456422" watchObservedRunningTime="2026-01-20 20:42:36.346173809 +0000 UTC m=+3184.296898788" Jan 20 20:42:37 crc kubenswrapper[4948]: I0120 20:42:37.524277 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:37 crc kubenswrapper[4948]: I0120 20:42:37.524605 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:37 crc kubenswrapper[4948]: I0120 20:42:37.570473 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:42:37 crc kubenswrapper[4948]: E0120 20:42:37.570806 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:42:38 crc kubenswrapper[4948]: I0120 20:42:38.583519 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgshd" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="registry-server" probeResult="failure" output=< Jan 20 20:42:38 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:42:38 crc kubenswrapper[4948]: > Jan 20 20:42:47 crc kubenswrapper[4948]: I0120 20:42:47.593010 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:47 crc kubenswrapper[4948]: I0120 20:42:47.655378 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:47 crc kubenswrapper[4948]: I0120 20:42:47.840909 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hgshd"] Jan 20 20:42:49 crc kubenswrapper[4948]: I0120 20:42:49.462677 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hgshd" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="registry-server" containerID="cri-o://8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6" gracePeriod=2 Jan 20 20:42:49 crc kubenswrapper[4948]: I0120 20:42:49.931534 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.092259 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-catalog-content\") pod \"dff70d04-3536-4569-9eef-44a63bac4da2\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.092419 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-utilities\") pod \"dff70d04-3536-4569-9eef-44a63bac4da2\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.092579 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rt2qw\" (UniqueName: \"kubernetes.io/projected/dff70d04-3536-4569-9eef-44a63bac4da2-kube-api-access-rt2qw\") pod \"dff70d04-3536-4569-9eef-44a63bac4da2\" (UID: \"dff70d04-3536-4569-9eef-44a63bac4da2\") " Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.094247 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-utilities" (OuterVolumeSpecName: "utilities") pod "dff70d04-3536-4569-9eef-44a63bac4da2" (UID: "dff70d04-3536-4569-9eef-44a63bac4da2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.100523 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dff70d04-3536-4569-9eef-44a63bac4da2-kube-api-access-rt2qw" (OuterVolumeSpecName: "kube-api-access-rt2qw") pod "dff70d04-3536-4569-9eef-44a63bac4da2" (UID: "dff70d04-3536-4569-9eef-44a63bac4da2"). InnerVolumeSpecName "kube-api-access-rt2qw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.194620 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.194689 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rt2qw\" (UniqueName: \"kubernetes.io/projected/dff70d04-3536-4569-9eef-44a63bac4da2-kube-api-access-rt2qw\") on node \"crc\" DevicePath \"\"" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.216132 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dff70d04-3536-4569-9eef-44a63bac4da2" (UID: "dff70d04-3536-4569-9eef-44a63bac4da2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.296505 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dff70d04-3536-4569-9eef-44a63bac4da2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.473603 4948 generic.go:334] "Generic (PLEG): container finished" podID="dff70d04-3536-4569-9eef-44a63bac4da2" containerID="8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6" exitCode=0 Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.473685 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgshd" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.473728 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgshd" event={"ID":"dff70d04-3536-4569-9eef-44a63bac4da2","Type":"ContainerDied","Data":"8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6"} Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.475392 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgshd" event={"ID":"dff70d04-3536-4569-9eef-44a63bac4da2","Type":"ContainerDied","Data":"9dee3ea726c982d0340ddfbceb6049d2b21af90f958e09d046fdad4ecd2e5980"} Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.475414 4948 scope.go:117] "RemoveContainer" containerID="8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.500037 4948 scope.go:117] "RemoveContainer" containerID="680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf" Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.521378 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hgshd"] Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.532330 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hgshd"] Jan 20 20:42:50 crc kubenswrapper[4948]: I0120 20:42:50.581638 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" path="/var/lib/kubelet/pods/dff70d04-3536-4569-9eef-44a63bac4da2/volumes" Jan 20 20:42:51 crc kubenswrapper[4948]: I0120 20:42:51.309269 4948 scope.go:117] "RemoveContainer" containerID="61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc" Jan 20 20:42:51 crc kubenswrapper[4948]: I0120 20:42:51.380273 4948 scope.go:117] "RemoveContainer" containerID="8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6" Jan 20 20:42:51 crc kubenswrapper[4948]: E0120 20:42:51.380737 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6\": container with ID starting with 8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6 not found: ID does not exist" containerID="8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6" Jan 20 20:42:51 crc kubenswrapper[4948]: I0120 20:42:51.380785 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6"} err="failed to get container status \"8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6\": rpc error: code = NotFound desc = could not find container \"8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6\": container with ID starting with 8d39a578659b8deb1d6d52eb853eeac55e92ae444180c4c505623dd2e0a990b6 not found: ID does not exist" Jan 20 20:42:51 crc kubenswrapper[4948]: I0120 20:42:51.380813 4948 scope.go:117] "RemoveContainer" containerID="680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf" Jan 20 20:42:51 crc kubenswrapper[4948]: E0120 20:42:51.381262 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf\": container with ID starting with 680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf not found: ID does not exist" containerID="680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf" Jan 20 20:42:51 crc kubenswrapper[4948]: I0120 20:42:51.381331 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf"} err="failed to get container status \"680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf\": rpc error: code = NotFound desc = could not find container \"680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf\": container with ID starting with 680204e13986e103d9b6a52bf692c2e439433d9dc9d8200d8cde709749f880cf not found: ID does not exist" Jan 20 20:42:51 crc kubenswrapper[4948]: I0120 20:42:51.381355 4948 scope.go:117] "RemoveContainer" containerID="61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc" Jan 20 20:42:51 crc kubenswrapper[4948]: E0120 20:42:51.382167 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc\": container with ID starting with 61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc not found: ID does not exist" containerID="61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc" Jan 20 20:42:51 crc kubenswrapper[4948]: I0120 20:42:51.382189 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc"} err="failed to get container status \"61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc\": rpc error: code = NotFound desc = could not find container \"61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc\": container with ID starting with 61de18015a01845489e26801b4e5e00008d0b9af7f99d60526fdb47ff5042acc not found: ID does not exist" Jan 20 20:42:51 crc kubenswrapper[4948]: I0120 20:42:51.571533 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:42:51 crc kubenswrapper[4948]: E0120 20:42:51.572139 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:43:05 crc kubenswrapper[4948]: I0120 20:43:05.570738 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:43:05 crc kubenswrapper[4948]: E0120 20:43:05.571626 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:43:18 crc kubenswrapper[4948]: I0120 20:43:18.570325 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:43:18 crc kubenswrapper[4948]: E0120 20:43:18.571185 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:43:31 crc kubenswrapper[4948]: I0120 20:43:31.569921 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:43:31 crc kubenswrapper[4948]: I0120 20:43:31.917340 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"a903b81d54eb3dba7835451af8d6e673d879722e4e0ac1bd55e1191b899c1340"} Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.169227 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b"] Jan 20 20:45:00 crc kubenswrapper[4948]: E0120 20:45:00.170293 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="extract-content" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.170309 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="extract-content" Jan 20 20:45:00 crc kubenswrapper[4948]: E0120 20:45:00.170328 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="extract-utilities" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.170335 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="extract-utilities" Jan 20 20:45:00 crc kubenswrapper[4948]: E0120 20:45:00.170347 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="registry-server" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.170353 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="registry-server" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.170526 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="dff70d04-3536-4569-9eef-44a63bac4da2" containerName="registry-server" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.171339 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.180067 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.180317 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.189437 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b"] Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.295523 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a56bba6b-259f-4c4b-8a31-f63ceac9684b-config-volume\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.295765 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjxdf\" (UniqueName: \"kubernetes.io/projected/a56bba6b-259f-4c4b-8a31-f63ceac9684b-kube-api-access-fjxdf\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.295943 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a56bba6b-259f-4c4b-8a31-f63ceac9684b-secret-volume\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.397829 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a56bba6b-259f-4c4b-8a31-f63ceac9684b-secret-volume\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.397885 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a56bba6b-259f-4c4b-8a31-f63ceac9684b-config-volume\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.397984 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjxdf\" (UniqueName: \"kubernetes.io/projected/a56bba6b-259f-4c4b-8a31-f63ceac9684b-kube-api-access-fjxdf\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.399109 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a56bba6b-259f-4c4b-8a31-f63ceac9684b-config-volume\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.415121 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a56bba6b-259f-4c4b-8a31-f63ceac9684b-secret-volume\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.417698 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjxdf\" (UniqueName: \"kubernetes.io/projected/a56bba6b-259f-4c4b-8a31-f63ceac9684b-kube-api-access-fjxdf\") pod \"collect-profiles-29482365-7rd8b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:00 crc kubenswrapper[4948]: I0120 20:45:00.506195 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:01 crc kubenswrapper[4948]: I0120 20:45:01.036315 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b"] Jan 20 20:45:01 crc kubenswrapper[4948]: I0120 20:45:01.934324 4948 generic.go:334] "Generic (PLEG): container finished" podID="a56bba6b-259f-4c4b-8a31-f63ceac9684b" containerID="f2da9936e36b9f69e241b730fe3cf202d40b1378c3ef89632946a3c15137805d" exitCode=0 Jan 20 20:45:01 crc kubenswrapper[4948]: I0120 20:45:01.934404 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" event={"ID":"a56bba6b-259f-4c4b-8a31-f63ceac9684b","Type":"ContainerDied","Data":"f2da9936e36b9f69e241b730fe3cf202d40b1378c3ef89632946a3c15137805d"} Jan 20 20:45:01 crc kubenswrapper[4948]: I0120 20:45:01.934642 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" event={"ID":"a56bba6b-259f-4c4b-8a31-f63ceac9684b","Type":"ContainerStarted","Data":"7988f76c05ab1cb7e8d7ce1ec44e7a863f14f5a48eb4ddc5af587ddc2f844422"} Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.344756 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.468285 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a56bba6b-259f-4c4b-8a31-f63ceac9684b-secret-volume\") pod \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.468347 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a56bba6b-259f-4c4b-8a31-f63ceac9684b-config-volume\") pod \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.468409 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjxdf\" (UniqueName: \"kubernetes.io/projected/a56bba6b-259f-4c4b-8a31-f63ceac9684b-kube-api-access-fjxdf\") pod \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\" (UID: \"a56bba6b-259f-4c4b-8a31-f63ceac9684b\") " Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.469061 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a56bba6b-259f-4c4b-8a31-f63ceac9684b-config-volume" (OuterVolumeSpecName: "config-volume") pod "a56bba6b-259f-4c4b-8a31-f63ceac9684b" (UID: "a56bba6b-259f-4c4b-8a31-f63ceac9684b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.469808 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a56bba6b-259f-4c4b-8a31-f63ceac9684b-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.477169 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a56bba6b-259f-4c4b-8a31-f63ceac9684b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a56bba6b-259f-4c4b-8a31-f63ceac9684b" (UID: "a56bba6b-259f-4c4b-8a31-f63ceac9684b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.478097 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a56bba6b-259f-4c4b-8a31-f63ceac9684b-kube-api-access-fjxdf" (OuterVolumeSpecName: "kube-api-access-fjxdf") pod "a56bba6b-259f-4c4b-8a31-f63ceac9684b" (UID: "a56bba6b-259f-4c4b-8a31-f63ceac9684b"). InnerVolumeSpecName "kube-api-access-fjxdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.571563 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a56bba6b-259f-4c4b-8a31-f63ceac9684b-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.571942 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjxdf\" (UniqueName: \"kubernetes.io/projected/a56bba6b-259f-4c4b-8a31-f63ceac9684b-kube-api-access-fjxdf\") on node \"crc\" DevicePath \"\"" Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.966134 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" event={"ID":"a56bba6b-259f-4c4b-8a31-f63ceac9684b","Type":"ContainerDied","Data":"7988f76c05ab1cb7e8d7ce1ec44e7a863f14f5a48eb4ddc5af587ddc2f844422"} Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.966183 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7988f76c05ab1cb7e8d7ce1ec44e7a863f14f5a48eb4ddc5af587ddc2f844422" Jan 20 20:45:03 crc kubenswrapper[4948]: I0120 20:45:03.966228 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482365-7rd8b" Jan 20 20:45:04 crc kubenswrapper[4948]: I0120 20:45:04.465193 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w"] Jan 20 20:45:04 crc kubenswrapper[4948]: I0120 20:45:04.520802 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482320-96r5w"] Jan 20 20:45:04 crc kubenswrapper[4948]: I0120 20:45:04.582310 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0573d7c9-3516-40cd-a9f5-3f8e99ad8c39" path="/var/lib/kubelet/pods/0573d7c9-3516-40cd-a9f5-3f8e99ad8c39/volumes" Jan 20 20:45:46 crc kubenswrapper[4948]: I0120 20:45:46.058609 4948 scope.go:117] "RemoveContainer" containerID="2900eadc7a9ab5d06018d0b68d33bfa089181e42e6002569f96e04453237ae78" Jan 20 20:45:50 crc kubenswrapper[4948]: I0120 20:45:50.250072 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:45:50 crc kubenswrapper[4948]: I0120 20:45:50.250834 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.758961 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-db5tw"] Jan 20 20:45:56 crc kubenswrapper[4948]: E0120 20:45:56.760046 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a56bba6b-259f-4c4b-8a31-f63ceac9684b" containerName="collect-profiles" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.760069 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="a56bba6b-259f-4c4b-8a31-f63ceac9684b" containerName="collect-profiles" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.760415 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="a56bba6b-259f-4c4b-8a31-f63ceac9684b" containerName="collect-profiles" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.762528 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.788560 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-db5tw"] Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.887631 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sclf\" (UniqueName: \"kubernetes.io/projected/0120cd08-de07-487b-af62-88990bca428d-kube-api-access-5sclf\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.888026 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0120cd08-de07-487b-af62-88990bca428d-catalog-content\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.888107 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0120cd08-de07-487b-af62-88990bca428d-utilities\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.990132 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0120cd08-de07-487b-af62-88990bca428d-utilities\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.990685 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0120cd08-de07-487b-af62-88990bca428d-utilities\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.990864 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sclf\" (UniqueName: \"kubernetes.io/projected/0120cd08-de07-487b-af62-88990bca428d-kube-api-access-5sclf\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.991136 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0120cd08-de07-487b-af62-88990bca428d-catalog-content\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:56 crc kubenswrapper[4948]: I0120 20:45:56.991477 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0120cd08-de07-487b-af62-88990bca428d-catalog-content\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:57 crc kubenswrapper[4948]: I0120 20:45:57.026669 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sclf\" (UniqueName: \"kubernetes.io/projected/0120cd08-de07-487b-af62-88990bca428d-kube-api-access-5sclf\") pod \"certified-operators-db5tw\" (UID: \"0120cd08-de07-487b-af62-88990bca428d\") " pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:57 crc kubenswrapper[4948]: I0120 20:45:57.090854 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:45:57 crc kubenswrapper[4948]: I0120 20:45:57.643603 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-db5tw"] Jan 20 20:45:58 crc kubenswrapper[4948]: I0120 20:45:58.535833 4948 generic.go:334] "Generic (PLEG): container finished" podID="0120cd08-de07-487b-af62-88990bca428d" containerID="875bbb253f258583ef8ccaa0378121a849c13cb0c3d80f8fa288067f6f65cc52" exitCode=0 Jan 20 20:45:58 crc kubenswrapper[4948]: I0120 20:45:58.536195 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-db5tw" event={"ID":"0120cd08-de07-487b-af62-88990bca428d","Type":"ContainerDied","Data":"875bbb253f258583ef8ccaa0378121a849c13cb0c3d80f8fa288067f6f65cc52"} Jan 20 20:45:58 crc kubenswrapper[4948]: I0120 20:45:58.536257 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-db5tw" event={"ID":"0120cd08-de07-487b-af62-88990bca428d","Type":"ContainerStarted","Data":"80e671869a7a7c1a4a575f5de379a18bfb239516f9293e1c5856f53c3fcab548"} Jan 20 20:46:04 crc kubenswrapper[4948]: I0120 20:46:04.592660 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-db5tw" event={"ID":"0120cd08-de07-487b-af62-88990bca428d","Type":"ContainerStarted","Data":"81bbd850d146e11e3935bfeb99a03c815ac8dc9d976babd83bdfd228260d0448"} Jan 20 20:46:05 crc kubenswrapper[4948]: I0120 20:46:05.602350 4948 generic.go:334] "Generic (PLEG): container finished" podID="0120cd08-de07-487b-af62-88990bca428d" containerID="81bbd850d146e11e3935bfeb99a03c815ac8dc9d976babd83bdfd228260d0448" exitCode=0 Jan 20 20:46:05 crc kubenswrapper[4948]: I0120 20:46:05.602403 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-db5tw" event={"ID":"0120cd08-de07-487b-af62-88990bca428d","Type":"ContainerDied","Data":"81bbd850d146e11e3935bfeb99a03c815ac8dc9d976babd83bdfd228260d0448"} Jan 20 20:46:06 crc kubenswrapper[4948]: I0120 20:46:06.616821 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-db5tw" event={"ID":"0120cd08-de07-487b-af62-88990bca428d","Type":"ContainerStarted","Data":"e81a0230f5a8477dce68901fcdf0d66d7e77d652038c25f0bc50a5ec01bc3b38"} Jan 20 20:46:07 crc kubenswrapper[4948]: I0120 20:46:07.091322 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:46:07 crc kubenswrapper[4948]: I0120 20:46:07.091686 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:46:08 crc kubenswrapper[4948]: I0120 20:46:08.143756 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-db5tw" podUID="0120cd08-de07-487b-af62-88990bca428d" containerName="registry-server" probeResult="failure" output=< Jan 20 20:46:08 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:46:08 crc kubenswrapper[4948]: > Jan 20 20:46:17 crc kubenswrapper[4948]: I0120 20:46:17.147068 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:46:17 crc kubenswrapper[4948]: I0120 20:46:17.166964 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-db5tw" podStartSLOduration=13.581130923 podStartE2EDuration="21.16693493s" podCreationTimestamp="2026-01-20 20:45:56 +0000 UTC" firstStartedPulling="2026-01-20 20:45:58.538500542 +0000 UTC m=+3386.489225551" lastFinishedPulling="2026-01-20 20:46:06.124304589 +0000 UTC m=+3394.075029558" observedRunningTime="2026-01-20 20:46:06.647010259 +0000 UTC m=+3394.597735228" watchObservedRunningTime="2026-01-20 20:46:17.16693493 +0000 UTC m=+3405.117659899" Jan 20 20:46:17 crc kubenswrapper[4948]: I0120 20:46:17.206630 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-db5tw" Jan 20 20:46:17 crc kubenswrapper[4948]: I0120 20:46:17.332404 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-db5tw"] Jan 20 20:46:17 crc kubenswrapper[4948]: I0120 20:46:17.398650 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cpztv"] Jan 20 20:46:17 crc kubenswrapper[4948]: I0120 20:46:17.398938 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cpztv" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="registry-server" containerID="cri-o://d5c55826673facc08a010914dca1e1855c9447cbc10b2b32f64e610171d93fca" gracePeriod=2 Jan 20 20:46:17 crc kubenswrapper[4948]: I0120 20:46:17.777076 4948 generic.go:334] "Generic (PLEG): container finished" podID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerID="d5c55826673facc08a010914dca1e1855c9447cbc10b2b32f64e610171d93fca" exitCode=0 Jan 20 20:46:17 crc kubenswrapper[4948]: I0120 20:46:17.777121 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpztv" event={"ID":"5882349f-db20-4e02-80dd-5a7f6b4e5f0f","Type":"ContainerDied","Data":"d5c55826673facc08a010914dca1e1855c9447cbc10b2b32f64e610171d93fca"} Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.177266 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpztv" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.204976 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kglj\" (UniqueName: \"kubernetes.io/projected/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-kube-api-access-4kglj\") pod \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.205050 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-catalog-content\") pod \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.205179 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-utilities\") pod \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\" (UID: \"5882349f-db20-4e02-80dd-5a7f6b4e5f0f\") " Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.205660 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-utilities" (OuterVolumeSpecName: "utilities") pod "5882349f-db20-4e02-80dd-5a7f6b4e5f0f" (UID: "5882349f-db20-4e02-80dd-5a7f6b4e5f0f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.214238 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-kube-api-access-4kglj" (OuterVolumeSpecName: "kube-api-access-4kglj") pod "5882349f-db20-4e02-80dd-5a7f6b4e5f0f" (UID: "5882349f-db20-4e02-80dd-5a7f6b4e5f0f"). InnerVolumeSpecName "kube-api-access-4kglj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.288324 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5882349f-db20-4e02-80dd-5a7f6b4e5f0f" (UID: "5882349f-db20-4e02-80dd-5a7f6b4e5f0f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.307202 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.307238 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kglj\" (UniqueName: \"kubernetes.io/projected/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-kube-api-access-4kglj\") on node \"crc\" DevicePath \"\"" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.307248 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5882349f-db20-4e02-80dd-5a7f6b4e5f0f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.788586 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpztv" event={"ID":"5882349f-db20-4e02-80dd-5a7f6b4e5f0f","Type":"ContainerDied","Data":"8102e813a574425559b34d88d5ca6854c2a309cd0936de1ec683b79d6b9ec942"} Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.788989 4948 scope.go:117] "RemoveContainer" containerID="d5c55826673facc08a010914dca1e1855c9447cbc10b2b32f64e610171d93fca" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.788687 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpztv" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.834068 4948 scope.go:117] "RemoveContainer" containerID="a0f2a35e63c95bb1c50f43243b1414fc76be85055ad06e4de510d28d847bbc71" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.883235 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cpztv"] Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.911340 4948 scope.go:117] "RemoveContainer" containerID="c786d7d5b53b61f7cddfe4913701f9aae7e84db4b5f21b40e779852c6453451d" Jan 20 20:46:18 crc kubenswrapper[4948]: I0120 20:46:18.920857 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cpztv"] Jan 20 20:46:20 crc kubenswrapper[4948]: I0120 20:46:20.249753 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:46:20 crc kubenswrapper[4948]: I0120 20:46:20.249819 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:46:20 crc kubenswrapper[4948]: I0120 20:46:20.580536 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" path="/var/lib/kubelet/pods/5882349f-db20-4e02-80dd-5a7f6b4e5f0f/volumes" Jan 20 20:46:50 crc kubenswrapper[4948]: I0120 20:46:50.249857 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:46:50 crc kubenswrapper[4948]: I0120 20:46:50.250291 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:46:50 crc kubenswrapper[4948]: I0120 20:46:50.250334 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:46:50 crc kubenswrapper[4948]: I0120 20:46:50.251091 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a903b81d54eb3dba7835451af8d6e673d879722e4e0ac1bd55e1191b899c1340"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:46:50 crc kubenswrapper[4948]: I0120 20:46:50.251137 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://a903b81d54eb3dba7835451af8d6e673d879722e4e0ac1bd55e1191b899c1340" gracePeriod=600 Jan 20 20:46:51 crc kubenswrapper[4948]: I0120 20:46:51.094044 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="a903b81d54eb3dba7835451af8d6e673d879722e4e0ac1bd55e1191b899c1340" exitCode=0 Jan 20 20:46:51 crc kubenswrapper[4948]: I0120 20:46:51.094262 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"a903b81d54eb3dba7835451af8d6e673d879722e4e0ac1bd55e1191b899c1340"} Jan 20 20:46:51 crc kubenswrapper[4948]: I0120 20:46:51.094553 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d"} Jan 20 20:46:51 crc kubenswrapper[4948]: I0120 20:46:51.094586 4948 scope.go:117] "RemoveContainer" containerID="d2584ef1e72d88e22313735ed4a86aab90035d22bd1aa4f388f83f3b997a402f" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.672284 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tdjnk"] Jan 20 20:47:11 crc kubenswrapper[4948]: E0120 20:47:11.673405 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="registry-server" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.673421 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="registry-server" Jan 20 20:47:11 crc kubenswrapper[4948]: E0120 20:47:11.673442 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="extract-content" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.673449 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="extract-content" Jan 20 20:47:11 crc kubenswrapper[4948]: E0120 20:47:11.673482 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="extract-utilities" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.673490 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="extract-utilities" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.673760 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="5882349f-db20-4e02-80dd-5a7f6b4e5f0f" containerName="registry-server" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.675548 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.702493 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tdjnk"] Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.791481 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6hmf\" (UniqueName: \"kubernetes.io/projected/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-kube-api-access-c6hmf\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.791717 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-utilities\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.791966 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-catalog-content\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.893959 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-catalog-content\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.894084 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6hmf\" (UniqueName: \"kubernetes.io/projected/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-kube-api-access-c6hmf\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.894187 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-utilities\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.894725 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-utilities\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.894719 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-catalog-content\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:11 crc kubenswrapper[4948]: I0120 20:47:11.923753 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6hmf\" (UniqueName: \"kubernetes.io/projected/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-kube-api-access-c6hmf\") pod \"redhat-marketplace-tdjnk\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:12 crc kubenswrapper[4948]: I0120 20:47:12.004055 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:12 crc kubenswrapper[4948]: I0120 20:47:12.529834 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tdjnk"] Jan 20 20:47:13 crc kubenswrapper[4948]: I0120 20:47:13.353770 4948 generic.go:334] "Generic (PLEG): container finished" podID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerID="68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a" exitCode=0 Jan 20 20:47:13 crc kubenswrapper[4948]: I0120 20:47:13.353854 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tdjnk" event={"ID":"44b51a17-28e2-4c5d-8f86-1aa00c8156a5","Type":"ContainerDied","Data":"68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a"} Jan 20 20:47:13 crc kubenswrapper[4948]: I0120 20:47:13.355283 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tdjnk" event={"ID":"44b51a17-28e2-4c5d-8f86-1aa00c8156a5","Type":"ContainerStarted","Data":"b44084ed7ecc2e91179949959f52801f4f3c383bb103c1f9cc238da17e600732"} Jan 20 20:47:14 crc kubenswrapper[4948]: I0120 20:47:14.390665 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tdjnk" event={"ID":"44b51a17-28e2-4c5d-8f86-1aa00c8156a5","Type":"ContainerStarted","Data":"bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471"} Jan 20 20:47:15 crc kubenswrapper[4948]: I0120 20:47:15.404878 4948 generic.go:334] "Generic (PLEG): container finished" podID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerID="bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471" exitCode=0 Jan 20 20:47:15 crc kubenswrapper[4948]: I0120 20:47:15.405064 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tdjnk" event={"ID":"44b51a17-28e2-4c5d-8f86-1aa00c8156a5","Type":"ContainerDied","Data":"bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471"} Jan 20 20:47:16 crc kubenswrapper[4948]: I0120 20:47:16.414700 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tdjnk" event={"ID":"44b51a17-28e2-4c5d-8f86-1aa00c8156a5","Type":"ContainerStarted","Data":"e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013"} Jan 20 20:47:16 crc kubenswrapper[4948]: I0120 20:47:16.440697 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tdjnk" podStartSLOduration=2.957146194 podStartE2EDuration="5.440647564s" podCreationTimestamp="2026-01-20 20:47:11 +0000 UTC" firstStartedPulling="2026-01-20 20:47:13.356968286 +0000 UTC m=+3461.307693255" lastFinishedPulling="2026-01-20 20:47:15.840469666 +0000 UTC m=+3463.791194625" observedRunningTime="2026-01-20 20:47:16.43314991 +0000 UTC m=+3464.383874919" watchObservedRunningTime="2026-01-20 20:47:16.440647564 +0000 UTC m=+3464.391372533" Jan 20 20:47:22 crc kubenswrapper[4948]: I0120 20:47:22.004474 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:22 crc kubenswrapper[4948]: I0120 20:47:22.004979 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:22 crc kubenswrapper[4948]: I0120 20:47:22.069840 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:22 crc kubenswrapper[4948]: I0120 20:47:22.673651 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:22 crc kubenswrapper[4948]: I0120 20:47:22.742420 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tdjnk"] Jan 20 20:47:24 crc kubenswrapper[4948]: I0120 20:47:24.608011 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tdjnk" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerName="registry-server" containerID="cri-o://e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013" gracePeriod=2 Jan 20 20:47:24 crc kubenswrapper[4948]: E0120 20:47:24.803067 4948 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44b51a17_28e2_4c5d_8f86_1aa00c8156a5.slice/crio-conmon-e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44b51a17_28e2_4c5d_8f86_1aa00c8156a5.slice/crio-e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013.scope\": RecentStats: unable to find data in memory cache]" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.068989 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.143238 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-catalog-content\") pod \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.143299 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-utilities\") pod \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.143399 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6hmf\" (UniqueName: \"kubernetes.io/projected/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-kube-api-access-c6hmf\") pod \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\" (UID: \"44b51a17-28e2-4c5d-8f86-1aa00c8156a5\") " Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.144165 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-utilities" (OuterVolumeSpecName: "utilities") pod "44b51a17-28e2-4c5d-8f86-1aa00c8156a5" (UID: "44b51a17-28e2-4c5d-8f86-1aa00c8156a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.166848 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-kube-api-access-c6hmf" (OuterVolumeSpecName: "kube-api-access-c6hmf") pod "44b51a17-28e2-4c5d-8f86-1aa00c8156a5" (UID: "44b51a17-28e2-4c5d-8f86-1aa00c8156a5"). InnerVolumeSpecName "kube-api-access-c6hmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.186359 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "44b51a17-28e2-4c5d-8f86-1aa00c8156a5" (UID: "44b51a17-28e2-4c5d-8f86-1aa00c8156a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.245197 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.245250 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.245265 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6hmf\" (UniqueName: \"kubernetes.io/projected/44b51a17-28e2-4c5d-8f86-1aa00c8156a5-kube-api-access-c6hmf\") on node \"crc\" DevicePath \"\"" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.617925 4948 generic.go:334] "Generic (PLEG): container finished" podID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerID="e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013" exitCode=0 Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.617981 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tdjnk" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.618008 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tdjnk" event={"ID":"44b51a17-28e2-4c5d-8f86-1aa00c8156a5","Type":"ContainerDied","Data":"e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013"} Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.618328 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tdjnk" event={"ID":"44b51a17-28e2-4c5d-8f86-1aa00c8156a5","Type":"ContainerDied","Data":"b44084ed7ecc2e91179949959f52801f4f3c383bb103c1f9cc238da17e600732"} Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.618353 4948 scope.go:117] "RemoveContainer" containerID="e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.650180 4948 scope.go:117] "RemoveContainer" containerID="bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.659725 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tdjnk"] Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.671204 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tdjnk"] Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.690967 4948 scope.go:117] "RemoveContainer" containerID="68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.730136 4948 scope.go:117] "RemoveContainer" containerID="e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013" Jan 20 20:47:25 crc kubenswrapper[4948]: E0120 20:47:25.732192 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013\": container with ID starting with e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013 not found: ID does not exist" containerID="e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.732234 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013"} err="failed to get container status \"e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013\": rpc error: code = NotFound desc = could not find container \"e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013\": container with ID starting with e59e3615c91a378c065437417a04c7d961b91d7a7e688304cbcaea16191a1013 not found: ID does not exist" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.732263 4948 scope.go:117] "RemoveContainer" containerID="bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471" Jan 20 20:47:25 crc kubenswrapper[4948]: E0120 20:47:25.736240 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471\": container with ID starting with bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471 not found: ID does not exist" containerID="bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.736299 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471"} err="failed to get container status \"bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471\": rpc error: code = NotFound desc = could not find container \"bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471\": container with ID starting with bfab6fde41644d1c6278e80e23c3e18d4078caa5541dbe08d399e728dac8b471 not found: ID does not exist" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.736320 4948 scope.go:117] "RemoveContainer" containerID="68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a" Jan 20 20:47:25 crc kubenswrapper[4948]: E0120 20:47:25.736593 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a\": container with ID starting with 68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a not found: ID does not exist" containerID="68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a" Jan 20 20:47:25 crc kubenswrapper[4948]: I0120 20:47:25.736640 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a"} err="failed to get container status \"68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a\": rpc error: code = NotFound desc = could not find container \"68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a\": container with ID starting with 68c1f36805de9db5b8c0dd549646208d2a3228c228521f881aed99852bc2c15a not found: ID does not exist" Jan 20 20:47:26 crc kubenswrapper[4948]: I0120 20:47:26.585196 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" path="/var/lib/kubelet/pods/44b51a17-28e2-4c5d-8f86-1aa00c8156a5/volumes" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.495486 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x6dmv"] Jan 20 20:47:37 crc kubenswrapper[4948]: E0120 20:47:37.497022 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerName="registry-server" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.497048 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerName="registry-server" Jan 20 20:47:37 crc kubenswrapper[4948]: E0120 20:47:37.497080 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerName="extract-utilities" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.497092 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerName="extract-utilities" Jan 20 20:47:37 crc kubenswrapper[4948]: E0120 20:47:37.497133 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerName="extract-content" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.497146 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerName="extract-content" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.497482 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="44b51a17-28e2-4c5d-8f86-1aa00c8156a5" containerName="registry-server" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.499884 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.507970 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x6dmv"] Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.635135 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzp9p\" (UniqueName: \"kubernetes.io/projected/51aec78e-7e7b-4418-b46e-b221f9b1594b-kube-api-access-kzp9p\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.635275 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-catalog-content\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.635460 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-utilities\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.736894 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzp9p\" (UniqueName: \"kubernetes.io/projected/51aec78e-7e7b-4418-b46e-b221f9b1594b-kube-api-access-kzp9p\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.737016 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-catalog-content\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.737132 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-utilities\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.737600 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-catalog-content\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.737951 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-utilities\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.767497 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzp9p\" (UniqueName: \"kubernetes.io/projected/51aec78e-7e7b-4418-b46e-b221f9b1594b-kube-api-access-kzp9p\") pod \"community-operators-x6dmv\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:37 crc kubenswrapper[4948]: I0120 20:47:37.828238 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:38 crc kubenswrapper[4948]: I0120 20:47:38.343274 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x6dmv"] Jan 20 20:47:38 crc kubenswrapper[4948]: I0120 20:47:38.757018 4948 generic.go:334] "Generic (PLEG): container finished" podID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerID="9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9" exitCode=0 Jan 20 20:47:38 crc kubenswrapper[4948]: I0120 20:47:38.757077 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x6dmv" event={"ID":"51aec78e-7e7b-4418-b46e-b221f9b1594b","Type":"ContainerDied","Data":"9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9"} Jan 20 20:47:38 crc kubenswrapper[4948]: I0120 20:47:38.757121 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x6dmv" event={"ID":"51aec78e-7e7b-4418-b46e-b221f9b1594b","Type":"ContainerStarted","Data":"6cb98dd8e63c52af0b0c35c1d8d521191a1a6f14650fa91a9a776332bff88b69"} Jan 20 20:47:38 crc kubenswrapper[4948]: I0120 20:47:38.759159 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:47:40 crc kubenswrapper[4948]: I0120 20:47:40.775940 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x6dmv" event={"ID":"51aec78e-7e7b-4418-b46e-b221f9b1594b","Type":"ContainerStarted","Data":"71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9"} Jan 20 20:47:41 crc kubenswrapper[4948]: I0120 20:47:41.787129 4948 generic.go:334] "Generic (PLEG): container finished" podID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerID="71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9" exitCode=0 Jan 20 20:47:41 crc kubenswrapper[4948]: I0120 20:47:41.787311 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x6dmv" event={"ID":"51aec78e-7e7b-4418-b46e-b221f9b1594b","Type":"ContainerDied","Data":"71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9"} Jan 20 20:47:42 crc kubenswrapper[4948]: I0120 20:47:42.797785 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x6dmv" event={"ID":"51aec78e-7e7b-4418-b46e-b221f9b1594b","Type":"ContainerStarted","Data":"f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744"} Jan 20 20:47:42 crc kubenswrapper[4948]: I0120 20:47:42.835545 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x6dmv" podStartSLOduration=2.337433118 podStartE2EDuration="5.835523317s" podCreationTimestamp="2026-01-20 20:47:37 +0000 UTC" firstStartedPulling="2026-01-20 20:47:38.758875926 +0000 UTC m=+3486.709600895" lastFinishedPulling="2026-01-20 20:47:42.256966125 +0000 UTC m=+3490.207691094" observedRunningTime="2026-01-20 20:47:42.827649672 +0000 UTC m=+3490.778374641" watchObservedRunningTime="2026-01-20 20:47:42.835523317 +0000 UTC m=+3490.786248286" Jan 20 20:47:47 crc kubenswrapper[4948]: I0120 20:47:47.829420 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:47 crc kubenswrapper[4948]: I0120 20:47:47.830034 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:48 crc kubenswrapper[4948]: I0120 20:47:48.895058 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-x6dmv" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="registry-server" probeResult="failure" output=< Jan 20 20:47:48 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:47:48 crc kubenswrapper[4948]: > Jan 20 20:47:57 crc kubenswrapper[4948]: I0120 20:47:57.880225 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:57 crc kubenswrapper[4948]: I0120 20:47:57.943113 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:58 crc kubenswrapper[4948]: I0120 20:47:58.127204 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x6dmv"] Jan 20 20:47:58 crc kubenswrapper[4948]: I0120 20:47:58.990912 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x6dmv" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="registry-server" containerID="cri-o://f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744" gracePeriod=2 Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.567565 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.572755 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzp9p\" (UniqueName: \"kubernetes.io/projected/51aec78e-7e7b-4418-b46e-b221f9b1594b-kube-api-access-kzp9p\") pod \"51aec78e-7e7b-4418-b46e-b221f9b1594b\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.572851 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-catalog-content\") pod \"51aec78e-7e7b-4418-b46e-b221f9b1594b\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.572888 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-utilities\") pod \"51aec78e-7e7b-4418-b46e-b221f9b1594b\" (UID: \"51aec78e-7e7b-4418-b46e-b221f9b1594b\") " Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.573681 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-utilities" (OuterVolumeSpecName: "utilities") pod "51aec78e-7e7b-4418-b46e-b221f9b1594b" (UID: "51aec78e-7e7b-4418-b46e-b221f9b1594b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.574169 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.591657 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51aec78e-7e7b-4418-b46e-b221f9b1594b-kube-api-access-kzp9p" (OuterVolumeSpecName: "kube-api-access-kzp9p") pod "51aec78e-7e7b-4418-b46e-b221f9b1594b" (UID: "51aec78e-7e7b-4418-b46e-b221f9b1594b"). InnerVolumeSpecName "kube-api-access-kzp9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.676149 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzp9p\" (UniqueName: \"kubernetes.io/projected/51aec78e-7e7b-4418-b46e-b221f9b1594b-kube-api-access-kzp9p\") on node \"crc\" DevicePath \"\"" Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.676907 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "51aec78e-7e7b-4418-b46e-b221f9b1594b" (UID: "51aec78e-7e7b-4418-b46e-b221f9b1594b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:47:59 crc kubenswrapper[4948]: I0120 20:47:59.778631 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51aec78e-7e7b-4418-b46e-b221f9b1594b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.002339 4948 generic.go:334] "Generic (PLEG): container finished" podID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerID="f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744" exitCode=0 Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.002383 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x6dmv" event={"ID":"51aec78e-7e7b-4418-b46e-b221f9b1594b","Type":"ContainerDied","Data":"f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744"} Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.002408 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x6dmv" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.002438 4948 scope.go:117] "RemoveContainer" containerID="f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.002417 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x6dmv" event={"ID":"51aec78e-7e7b-4418-b46e-b221f9b1594b","Type":"ContainerDied","Data":"6cb98dd8e63c52af0b0c35c1d8d521191a1a6f14650fa91a9a776332bff88b69"} Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.035590 4948 scope.go:117] "RemoveContainer" containerID="71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.046604 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x6dmv"] Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.066767 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x6dmv"] Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.075326 4948 scope.go:117] "RemoveContainer" containerID="9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.118941 4948 scope.go:117] "RemoveContainer" containerID="f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744" Jan 20 20:48:00 crc kubenswrapper[4948]: E0120 20:48:00.120993 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744\": container with ID starting with f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744 not found: ID does not exist" containerID="f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.121041 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744"} err="failed to get container status \"f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744\": rpc error: code = NotFound desc = could not find container \"f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744\": container with ID starting with f73d0f452d9fd81f9c3a235e1ed07962af19c39ef355eb2fab6f7061d0e82744 not found: ID does not exist" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.121071 4948 scope.go:117] "RemoveContainer" containerID="71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9" Jan 20 20:48:00 crc kubenswrapper[4948]: E0120 20:48:00.121352 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9\": container with ID starting with 71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9 not found: ID does not exist" containerID="71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.121375 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9"} err="failed to get container status \"71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9\": rpc error: code = NotFound desc = could not find container \"71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9\": container with ID starting with 71af274b00373708ac1ffe7ac092a76ad9333824ae565ae920b96880e084c4c9 not found: ID does not exist" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.121389 4948 scope.go:117] "RemoveContainer" containerID="9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9" Jan 20 20:48:00 crc kubenswrapper[4948]: E0120 20:48:00.121621 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9\": container with ID starting with 9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9 not found: ID does not exist" containerID="9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.121637 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9"} err="failed to get container status \"9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9\": rpc error: code = NotFound desc = could not find container \"9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9\": container with ID starting with 9af713f4510a0e4d438ac057ff617ca74f96a4dd4a981b4e0fe593da115c15d9 not found: ID does not exist" Jan 20 20:48:00 crc kubenswrapper[4948]: I0120 20:48:00.580272 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" path="/var/lib/kubelet/pods/51aec78e-7e7b-4418-b46e-b221f9b1594b/volumes" Jan 20 20:48:50 crc kubenswrapper[4948]: I0120 20:48:50.250527 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:48:50 crc kubenswrapper[4948]: I0120 20:48:50.252601 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:48:50 crc kubenswrapper[4948]: I0120 20:48:50.477287 4948 generic.go:334] "Generic (PLEG): container finished" podID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerID="52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2" exitCode=0 Jan 20 20:48:50 crc kubenswrapper[4948]: I0120 20:48:50.477592 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-7qrk8/must-gather-64jzl" event={"ID":"337d06be-7739-418e-a1ec-9c1e0936cf6b","Type":"ContainerDied","Data":"52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2"} Jan 20 20:48:50 crc kubenswrapper[4948]: I0120 20:48:50.478262 4948 scope.go:117] "RemoveContainer" containerID="52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2" Jan 20 20:48:51 crc kubenswrapper[4948]: I0120 20:48:51.343063 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7qrk8_must-gather-64jzl_337d06be-7739-418e-a1ec-9c1e0936cf6b/gather/0.log" Jan 20 20:48:59 crc kubenswrapper[4948]: I0120 20:48:59.774219 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-7qrk8/must-gather-64jzl"] Jan 20 20:48:59 crc kubenswrapper[4948]: I0120 20:48:59.775849 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-7qrk8/must-gather-64jzl" podUID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerName="copy" containerID="cri-o://8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a" gracePeriod=2 Jan 20 20:48:59 crc kubenswrapper[4948]: I0120 20:48:59.796307 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-7qrk8/must-gather-64jzl"] Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.287613 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7qrk8_must-gather-64jzl_337d06be-7739-418e-a1ec-9c1e0936cf6b/copy/0.log" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.288585 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.340773 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/337d06be-7739-418e-a1ec-9c1e0936cf6b-must-gather-output\") pod \"337d06be-7739-418e-a1ec-9c1e0936cf6b\" (UID: \"337d06be-7739-418e-a1ec-9c1e0936cf6b\") " Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.340835 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bq8kj\" (UniqueName: \"kubernetes.io/projected/337d06be-7739-418e-a1ec-9c1e0936cf6b-kube-api-access-bq8kj\") pod \"337d06be-7739-418e-a1ec-9c1e0936cf6b\" (UID: \"337d06be-7739-418e-a1ec-9c1e0936cf6b\") " Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.351854 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/337d06be-7739-418e-a1ec-9c1e0936cf6b-kube-api-access-bq8kj" (OuterVolumeSpecName: "kube-api-access-bq8kj") pod "337d06be-7739-418e-a1ec-9c1e0936cf6b" (UID: "337d06be-7739-418e-a1ec-9c1e0936cf6b"). InnerVolumeSpecName "kube-api-access-bq8kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.443200 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bq8kj\" (UniqueName: \"kubernetes.io/projected/337d06be-7739-418e-a1ec-9c1e0936cf6b-kube-api-access-bq8kj\") on node \"crc\" DevicePath \"\"" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.532413 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/337d06be-7739-418e-a1ec-9c1e0936cf6b-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "337d06be-7739-418e-a1ec-9c1e0936cf6b" (UID: "337d06be-7739-418e-a1ec-9c1e0936cf6b"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.544929 4948 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/337d06be-7739-418e-a1ec-9c1e0936cf6b-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.583697 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="337d06be-7739-418e-a1ec-9c1e0936cf6b" path="/var/lib/kubelet/pods/337d06be-7739-418e-a1ec-9c1e0936cf6b/volumes" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.583739 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-7qrk8_must-gather-64jzl_337d06be-7739-418e-a1ec-9c1e0936cf6b/copy/0.log" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.584603 4948 generic.go:334] "Generic (PLEG): container finished" podID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerID="8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a" exitCode=143 Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.584716 4948 scope.go:117] "RemoveContainer" containerID="8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.584763 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-7qrk8/must-gather-64jzl" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.605685 4948 scope.go:117] "RemoveContainer" containerID="52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.683415 4948 scope.go:117] "RemoveContainer" containerID="8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a" Jan 20 20:49:00 crc kubenswrapper[4948]: E0120 20:49:00.683926 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a\": container with ID starting with 8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a not found: ID does not exist" containerID="8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.683972 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a"} err="failed to get container status \"8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a\": rpc error: code = NotFound desc = could not find container \"8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a\": container with ID starting with 8bb102ab5ecbf2e13963e065a1a8569ca11e65aaeabcaea5536f30608a779a5a not found: ID does not exist" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.684001 4948 scope.go:117] "RemoveContainer" containerID="52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2" Jan 20 20:49:00 crc kubenswrapper[4948]: E0120 20:49:00.684290 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2\": container with ID starting with 52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2 not found: ID does not exist" containerID="52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2" Jan 20 20:49:00 crc kubenswrapper[4948]: I0120 20:49:00.684312 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2"} err="failed to get container status \"52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2\": rpc error: code = NotFound desc = could not find container \"52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2\": container with ID starting with 52dcd37eb39af2bb8b18a7d7c33beb2dcb2351ad235fe002e47ec2e91aba43a2 not found: ID does not exist" Jan 20 20:49:20 crc kubenswrapper[4948]: I0120 20:49:20.249620 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:49:20 crc kubenswrapper[4948]: I0120 20:49:20.261337 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:49:50 crc kubenswrapper[4948]: I0120 20:49:50.445599 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:49:50 crc kubenswrapper[4948]: I0120 20:49:50.446231 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:49:50 crc kubenswrapper[4948]: I0120 20:49:50.446290 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:49:50 crc kubenswrapper[4948]: I0120 20:49:50.446987 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:49:50 crc kubenswrapper[4948]: I0120 20:49:50.447045 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" gracePeriod=600 Jan 20 20:49:50 crc kubenswrapper[4948]: E0120 20:49:50.576742 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:49:51 crc kubenswrapper[4948]: I0120 20:49:51.072674 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" exitCode=0 Jan 20 20:49:51 crc kubenswrapper[4948]: I0120 20:49:51.073143 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d"} Jan 20 20:49:51 crc kubenswrapper[4948]: I0120 20:49:51.073402 4948 scope.go:117] "RemoveContainer" containerID="a903b81d54eb3dba7835451af8d6e673d879722e4e0ac1bd55e1191b899c1340" Jan 20 20:49:51 crc kubenswrapper[4948]: I0120 20:49:51.076878 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:49:51 crc kubenswrapper[4948]: E0120 20:49:51.078026 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:50:04 crc kubenswrapper[4948]: I0120 20:50:04.569773 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:50:04 crc kubenswrapper[4948]: E0120 20:50:04.571872 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:50:19 crc kubenswrapper[4948]: I0120 20:50:19.570309 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:50:19 crc kubenswrapper[4948]: E0120 20:50:19.572529 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:50:34 crc kubenswrapper[4948]: I0120 20:50:34.569882 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:50:34 crc kubenswrapper[4948]: E0120 20:50:34.570582 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.548843 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pz7tb/must-gather-749j8"] Jan 20 20:50:41 crc kubenswrapper[4948]: E0120 20:50:41.549764 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="extract-utilities" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.549781 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="extract-utilities" Jan 20 20:50:41 crc kubenswrapper[4948]: E0120 20:50:41.549809 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerName="copy" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.549815 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerName="copy" Jan 20 20:50:41 crc kubenswrapper[4948]: E0120 20:50:41.549831 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="registry-server" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.549838 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="registry-server" Jan 20 20:50:41 crc kubenswrapper[4948]: E0120 20:50:41.549846 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="extract-content" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.549851 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="extract-content" Jan 20 20:50:41 crc kubenswrapper[4948]: E0120 20:50:41.549861 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerName="gather" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.549866 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerName="gather" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.550101 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerName="gather" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.550113 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="337d06be-7739-418e-a1ec-9c1e0936cf6b" containerName="copy" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.550129 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="51aec78e-7e7b-4418-b46e-b221f9b1594b" containerName="registry-server" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.551114 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.567601 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pz7tb"/"kube-root-ca.crt" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.571054 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pz7tb"/"openshift-service-ca.crt" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.614181 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pz7tb/must-gather-749j8"] Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.737183 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffsk9\" (UniqueName: \"kubernetes.io/projected/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-kube-api-access-ffsk9\") pod \"must-gather-749j8\" (UID: \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\") " pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.737460 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-must-gather-output\") pod \"must-gather-749j8\" (UID: \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\") " pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.838742 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffsk9\" (UniqueName: \"kubernetes.io/projected/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-kube-api-access-ffsk9\") pod \"must-gather-749j8\" (UID: \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\") " pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.838833 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-must-gather-output\") pod \"must-gather-749j8\" (UID: \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\") " pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.839429 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-must-gather-output\") pod \"must-gather-749j8\" (UID: \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\") " pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.859582 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffsk9\" (UniqueName: \"kubernetes.io/projected/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-kube-api-access-ffsk9\") pod \"must-gather-749j8\" (UID: \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\") " pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 20:50:41 crc kubenswrapper[4948]: I0120 20:50:41.868339 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 20:50:42 crc kubenswrapper[4948]: I0120 20:50:42.414328 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pz7tb/must-gather-749j8"] Jan 20 20:50:42 crc kubenswrapper[4948]: I0120 20:50:42.615402 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/must-gather-749j8" event={"ID":"c84f95ac-5d9f-467b-90fa-fa7da9b2c851","Type":"ContainerStarted","Data":"929a9f1bb40b153fe92185fbe646c62b76342d03ce9cdad13d2dd7b623ae20d7"} Jan 20 20:50:43 crc kubenswrapper[4948]: I0120 20:50:43.626549 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/must-gather-749j8" event={"ID":"c84f95ac-5d9f-467b-90fa-fa7da9b2c851","Type":"ContainerStarted","Data":"ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27"} Jan 20 20:50:43 crc kubenswrapper[4948]: I0120 20:50:43.627039 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/must-gather-749j8" event={"ID":"c84f95ac-5d9f-467b-90fa-fa7da9b2c851","Type":"ContainerStarted","Data":"29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd"} Jan 20 20:50:43 crc kubenswrapper[4948]: I0120 20:50:43.645027 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pz7tb/must-gather-749j8" podStartSLOduration=2.64499501 podStartE2EDuration="2.64499501s" podCreationTimestamp="2026-01-20 20:50:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:50:43.641014396 +0000 UTC m=+3671.591739375" watchObservedRunningTime="2026-01-20 20:50:43.64499501 +0000 UTC m=+3671.595719979" Jan 20 20:50:46 crc kubenswrapper[4948]: I0120 20:50:46.819345 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-cbs49"] Jan 20 20:50:46 crc kubenswrapper[4948]: I0120 20:50:46.820863 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:50:46 crc kubenswrapper[4948]: I0120 20:50:46.827432 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pz7tb"/"default-dockercfg-hrmzv" Jan 20 20:50:46 crc kubenswrapper[4948]: I0120 20:50:46.854619 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1940606a-a63d-458a-b74a-0aec9e06d727-host\") pod \"crc-debug-cbs49\" (UID: \"1940606a-a63d-458a-b74a-0aec9e06d727\") " pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:50:46 crc kubenswrapper[4948]: I0120 20:50:46.966225 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrrld\" (UniqueName: \"kubernetes.io/projected/1940606a-a63d-458a-b74a-0aec9e06d727-kube-api-access-hrrld\") pod \"crc-debug-cbs49\" (UID: \"1940606a-a63d-458a-b74a-0aec9e06d727\") " pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:50:46 crc kubenswrapper[4948]: I0120 20:50:46.966638 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1940606a-a63d-458a-b74a-0aec9e06d727-host\") pod \"crc-debug-cbs49\" (UID: \"1940606a-a63d-458a-b74a-0aec9e06d727\") " pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:50:46 crc kubenswrapper[4948]: I0120 20:50:46.966903 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1940606a-a63d-458a-b74a-0aec9e06d727-host\") pod \"crc-debug-cbs49\" (UID: \"1940606a-a63d-458a-b74a-0aec9e06d727\") " pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:50:47 crc kubenswrapper[4948]: I0120 20:50:47.069080 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrrld\" (UniqueName: \"kubernetes.io/projected/1940606a-a63d-458a-b74a-0aec9e06d727-kube-api-access-hrrld\") pod \"crc-debug-cbs49\" (UID: \"1940606a-a63d-458a-b74a-0aec9e06d727\") " pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:50:47 crc kubenswrapper[4948]: I0120 20:50:47.091654 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrrld\" (UniqueName: \"kubernetes.io/projected/1940606a-a63d-458a-b74a-0aec9e06d727-kube-api-access-hrrld\") pod \"crc-debug-cbs49\" (UID: \"1940606a-a63d-458a-b74a-0aec9e06d727\") " pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:50:47 crc kubenswrapper[4948]: I0120 20:50:47.138658 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:50:47 crc kubenswrapper[4948]: W0120 20:50:47.179780 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1940606a_a63d_458a_b74a_0aec9e06d727.slice/crio-51291f6ef56c054030bfeb19cb13e97cba155bbcc81c9b220960498f3db112b0 WatchSource:0}: Error finding container 51291f6ef56c054030bfeb19cb13e97cba155bbcc81c9b220960498f3db112b0: Status 404 returned error can't find the container with id 51291f6ef56c054030bfeb19cb13e97cba155bbcc81c9b220960498f3db112b0 Jan 20 20:50:47 crc kubenswrapper[4948]: I0120 20:50:47.669686 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/crc-debug-cbs49" event={"ID":"1940606a-a63d-458a-b74a-0aec9e06d727","Type":"ContainerStarted","Data":"2bf3ac32145bf900f863a520a4031022443810123d405d2b29917d06e77ab513"} Jan 20 20:50:47 crc kubenswrapper[4948]: I0120 20:50:47.670319 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/crc-debug-cbs49" event={"ID":"1940606a-a63d-458a-b74a-0aec9e06d727","Type":"ContainerStarted","Data":"51291f6ef56c054030bfeb19cb13e97cba155bbcc81c9b220960498f3db112b0"} Jan 20 20:50:47 crc kubenswrapper[4948]: I0120 20:50:47.687876 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pz7tb/crc-debug-cbs49" podStartSLOduration=1.687859593 podStartE2EDuration="1.687859593s" podCreationTimestamp="2026-01-20 20:50:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 20:50:47.683992623 +0000 UTC m=+3675.634717592" watchObservedRunningTime="2026-01-20 20:50:47.687859593 +0000 UTC m=+3675.638584562" Jan 20 20:50:49 crc kubenswrapper[4948]: I0120 20:50:49.571033 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:50:49 crc kubenswrapper[4948]: E0120 20:50:49.571835 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:50:49 crc kubenswrapper[4948]: I0120 20:50:49.909451 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-869694d5d6-n6ftn_7eca20c7-5485-4fce-9c6e-d3bd3943adc1/barbican-api-log/0.log" Jan 20 20:50:49 crc kubenswrapper[4948]: I0120 20:50:49.916282 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-869694d5d6-n6ftn_7eca20c7-5485-4fce-9c6e-d3bd3943adc1/barbican-api/0.log" Jan 20 20:50:49 crc kubenswrapper[4948]: I0120 20:50:49.991497 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-88477f558-k4bcx_e71b28b0-54d9-48ce-9442-412fbdd5fe0f/barbican-keystone-listener-log/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.002762 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-88477f558-k4bcx_e71b28b0-54d9-48ce-9442-412fbdd5fe0f/barbican-keystone-listener/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.021213 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6d76c4759-rj9ns_9b73cf57-92bd-47c5-8f21-ffcc9438594b/barbican-worker-log/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.028090 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6d76c4759-rj9ns_9b73cf57-92bd-47c5-8f21-ffcc9438594b/barbican-worker/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.098782 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-6jwwn_11f8f855-5031-4c77-88c5-07f606419c1f/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.133678 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ad8829d7-3d58-4752-9f62-83663e2dad23/ceilometer-central-agent/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.155001 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ad8829d7-3d58-4752-9f62-83663e2dad23/ceilometer-notification-agent/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.160354 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ad8829d7-3d58-4752-9f62-83663e2dad23/sg-core/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.171956 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ad8829d7-3d58-4752-9f62-83663e2dad23/proxy-httpd/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.189389 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bf15b74a-2849-4970-87a3-83d7e1b788ba/cinder-api-log/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.244636 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_bf15b74a-2849-4970-87a3-83d7e1b788ba/cinder-api/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.299399 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e95290f6-0498-4bfa-8653-3a53edf4f01f/cinder-scheduler/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.336887 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e95290f6-0498-4bfa-8653-3a53edf4f01f/probe/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.359776 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-52fgv_88dba5f2-ff1f-420f-a1cf-e78fd5512d44/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.376858 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-2446g_c43c5ed8-ee74-481a-9b89-30845f8380b8/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.439036 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-f4d4c4b7-5pcpw_fb7020ef-1f09-4241-9001-eb628c16fd07/dnsmasq-dns/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.443904 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-f4d4c4b7-5pcpw_fb7020ef-1f09-4241-9001-eb628c16fd07/init/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.479090 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-x77kc_bdfde737-ff95-41e6-a124-accfa3f24d58/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.499842 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf/glance-log/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.521446 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c35f0ddf-3894-4ab3-bfa1-d55fbc83a4bf/glance-httpd/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.532032 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2f39439c-442b-407e-9b64-ed1a23e6a97c/glance-log/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.553984 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2f39439c-442b-407e-9b64-ed1a23e6a97c/glance-httpd/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.844840 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-67dd67cb9b-9w4wk_4d2c0905-915e-4504-8454-ee3500220ab3/horizon-log/0.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.951800 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-67dd67cb9b-9w4wk_4d2c0905-915e-4504-8454-ee3500220ab3/horizon/2.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.961492 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-67dd67cb9b-9w4wk_4d2c0905-915e-4504-8454-ee3500220ab3/horizon/1.log" Jan 20 20:50:50 crc kubenswrapper[4948]: I0120 20:50:50.988911 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-wb6fq_cf7abc7a-4446-4807-af6e-96711d710f9e/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.013362 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-gbbgp_a036dc78-f9f1-467a-b272-a45b9280bc99/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.130509 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7c45b45594-rdsj9_413e45d6-d022-4586-82cc-228d8431dce4/keystone-api/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.139108 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_3c4b94fb-bdd9-4bcb-b9e3-b75aac1d4b0f/kube-state-metrics/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.223260 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-5zcz2_c6149a97-b5c3-4ec7-8b50-fc3a77843b48/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.517657 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_d6257c47-078f-4d41-942c-45d7e57b8c15/memcached/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.553620 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-79d47bbd4f-rpj54_4005ab42-8a7a-4951-ba75-b1f7a3d2a063/neutron-api/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.691994 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-79d47bbd4f-rpj54_4005ab42-8a7a-4951-ba75-b1f7a3d2a063/neutron-httpd/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.717412 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-m7kn2_a14c4acd-7573-4e72-9ab4-c1263844f59e/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:51 crc kubenswrapper[4948]: I0120 20:50:51.824563 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_0bef1366-a94a-4d51-a5b4-53fe9a86a4d9/nova-api-log/0.log" Jan 20 20:50:52 crc kubenswrapper[4948]: I0120 20:50:52.163474 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_0bef1366-a94a-4d51-a5b4-53fe9a86a4d9/nova-api-api/0.log" Jan 20 20:50:52 crc kubenswrapper[4948]: I0120 20:50:52.297183 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_8c56770f-e8ae-4540-9bb0-34123665502e/nova-cell0-conductor-conductor/0.log" Jan 20 20:50:52 crc kubenswrapper[4948]: I0120 20:50:52.408396 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_d3f5f7e6-247c-41c7-877c-f43cf1b1f412/nova-cell1-conductor-conductor/0.log" Jan 20 20:50:52 crc kubenswrapper[4948]: I0120 20:50:52.518903 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_8dc0455c-7835-456a-b537-34836da2cdff/nova-cell1-novncproxy-novncproxy/0.log" Jan 20 20:50:52 crc kubenswrapper[4948]: I0120 20:50:52.597258 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-x5v8p_4bb85740-d63d-4363-91af-c07eecf6ab45/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:52 crc kubenswrapper[4948]: I0120 20:50:52.689403 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_405260b6-bbf5-4d0b-8a81-686340252185/nova-metadata-log/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.524405 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_405260b6-bbf5-4d0b-8a81-686340252185/nova-metadata-metadata/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.631180 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_7d52d1e7-1dc7-4341-b483-da6863189804/nova-scheduler-scheduler/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.659511 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_68260cc0-7bcb-4582-8154-60bbcdfbcf04/galera/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.674943 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_68260cc0-7bcb-4582-8154-60bbcdfbcf04/mysql-bootstrap/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.707820 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_67ccceb8-ab3c-4304-9336-8938675a1012/galera/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.724957 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_67ccceb8-ab3c-4304-9336-8938675a1012/mysql-bootstrap/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.743059 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_d1222f27-af2a-46fd-a296-37bdb8db4486/openstackclient/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.766852 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-hpg27_46328967-e69a-4d46-86d6-ba1af248c8f2/ovn-controller/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.774969 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-g8dbf_3bdd9991-773b-4709-a6e1-426c1fc89d23/openstack-network-exporter/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.797959 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dgkh9_7e8635e1-cc17-4a2e-9b45-b76043df05d4/ovsdb-server/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.809970 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dgkh9_7e8635e1-cc17-4a2e-9b45-b76043df05d4/ovs-vswitchd/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.817762 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dgkh9_7e8635e1-cc17-4a2e-9b45-b76043df05d4/ovsdb-server-init/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.854776 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-7tm27_ee6e6079-b341-4648-b640-da45d2f27ed5/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.867195 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8beae232-ff35-4a9c-9f68-0d9c20e65c67/ovn-northd/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.875596 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_8beae232-ff35-4a9c-9f68-0d9c20e65c67/openstack-network-exporter/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.894821 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_db2122b2-3a50-4587-944d-ca8aa51882ab/ovsdbserver-nb/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.900973 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_db2122b2-3a50-4587-944d-ca8aa51882ab/openstack-network-exporter/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.914047 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_25b56954-2973-439d-a473-019d32e6ec0c/ovsdbserver-sb/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.919533 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_25b56954-2973-439d-a473-019d32e6ec0c/openstack-network-exporter/0.log" Jan 20 20:50:53 crc kubenswrapper[4948]: I0120 20:50:53.970497 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6965b8b8b4-5f4wt_923c67b1-e9b6-4c67-86aa-96dc2760ba19/placement-log/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.021393 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6965b8b8b4-5f4wt_923c67b1-e9b6-4c67-86aa-96dc2760ba19/placement-api/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.043962 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_899d2813-4685-40b7-ba95-60d3126802a2/rabbitmq/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.052700 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_899d2813-4685-40b7-ba95-60d3126802a2/setup-container/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.074362 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8c30b121-20f6-47ad-89e0-ce511df4efb7/rabbitmq/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.084768 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_8c30b121-20f6-47ad-89e0-ce511df4efb7/setup-container/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.100528 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-glx8p_c2713e4e-89b8-4d59-9a34-947cd7af2e0e/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.110888 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-2bxbf_cd1a8ab5-15f0-4194-bb29-4bd56b856c33/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.127812 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-482zl_5a4fea5f-1b46-482d-a956-9307be45284c/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.141235 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-kgkms_1a69232e-a7d3-43f7-a730-b21ffbf62e38/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.153652 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-spfvx_fc3ad5c4-f353-42b4-8266-6180aae6f48f/ssh-known-hosts-edpm-deployment/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.370036 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-646f4c575-wzbtn_e0464310-34e8-4747-9a37-6a9ce764a73a/proxy-httpd/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.417194 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-646f4c575-wzbtn_e0464310-34e8-4747-9a37-6a9ce764a73a/proxy-server/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.428800 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-ctgvx_ce6ef66a-e0b9-4dbf-9c1b-262e952e9845/swift-ring-rebalance/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.478672 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/account-server/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.495812 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/account-replicator/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.504732 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/account-auditor/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.516831 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/account-reaper/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.567493 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/container-server/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.600766 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/container-replicator/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.607118 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/container-auditor/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.622898 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/container-updater/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.667204 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-server/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.687829 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-replicator/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.725091 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-auditor/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.735791 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-updater/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.748824 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/object-expirer/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.766184 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/rsync/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.774459 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_253a8193-904e-4f62-adbe-597b97b4fd30/swift-recon-cron/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.859207 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-ht82b_28bbc15a-1085-4cbd-9dac-0180526816bc/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.882939 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_84db0de1-b0d6-4a7f-88d8-6470a493ef78/tempest-tests-tempest-tests-runner/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.900266 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_5db0e8eb-349c-41d5-96d3-9025f96d2869/test-operator-logs-container/0.log" Jan 20 20:50:54 crc kubenswrapper[4948]: I0120 20:50:54.922652 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-rv7pg_ada055ea-6aa5-4e75-ad5b-4caec7647608/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 20 20:51:03 crc kubenswrapper[4948]: I0120 20:51:03.570124 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:51:03 crc kubenswrapper[4948]: E0120 20:51:03.571048 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:51:11 crc kubenswrapper[4948]: I0120 20:51:11.390389 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/controller/0.log" Jan 20 20:51:11 crc kubenswrapper[4948]: I0120 20:51:11.397266 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/kube-rbac-proxy/0.log" Jan 20 20:51:11 crc kubenswrapper[4948]: I0120 20:51:11.425845 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/controller/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.741890 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.749497 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/reloader/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.758963 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr-metrics/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.766167 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.773657 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy-frr/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.780273 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-frr-files/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.789588 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-reloader/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.798262 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-metrics/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.810481 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-mxgmc_06d4b8b1-3c5f-4736-9492-bc33db43f510/frr-k8s-webhook-server/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.838532 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7998c69bcc-rkwld_a422b9d2-2fe8-485a-a7c7-fb0fa96706c9/manager/0.log" Jan 20 20:51:12 crc kubenswrapper[4948]: I0120 20:51:12.848999 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-989f8776d-mst22_3eb6ce14-f5fb-4e93-8f16-d4b0eec67237/webhook-server/0.log" Jan 20 20:51:13 crc kubenswrapper[4948]: I0120 20:51:13.227856 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/speaker/0.log" Jan 20 20:51:13 crc kubenswrapper[4948]: I0120 20:51:13.233290 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/kube-rbac-proxy/0.log" Jan 20 20:51:17 crc kubenswrapper[4948]: I0120 20:51:17.569553 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:51:17 crc kubenswrapper[4948]: E0120 20:51:17.570398 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:51:22 crc kubenswrapper[4948]: I0120 20:51:22.024375 4948 generic.go:334] "Generic (PLEG): container finished" podID="1940606a-a63d-458a-b74a-0aec9e06d727" containerID="2bf3ac32145bf900f863a520a4031022443810123d405d2b29917d06e77ab513" exitCode=0 Jan 20 20:51:22 crc kubenswrapper[4948]: I0120 20:51:22.024456 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/crc-debug-cbs49" event={"ID":"1940606a-a63d-458a-b74a-0aec9e06d727","Type":"ContainerDied","Data":"2bf3ac32145bf900f863a520a4031022443810123d405d2b29917d06e77ab513"} Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.145062 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.179783 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-cbs49"] Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.188149 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-cbs49"] Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.257604 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hrrld\" (UniqueName: \"kubernetes.io/projected/1940606a-a63d-458a-b74a-0aec9e06d727-kube-api-access-hrrld\") pod \"1940606a-a63d-458a-b74a-0aec9e06d727\" (UID: \"1940606a-a63d-458a-b74a-0aec9e06d727\") " Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.258054 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1940606a-a63d-458a-b74a-0aec9e06d727-host\") pod \"1940606a-a63d-458a-b74a-0aec9e06d727\" (UID: \"1940606a-a63d-458a-b74a-0aec9e06d727\") " Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.258163 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1940606a-a63d-458a-b74a-0aec9e06d727-host" (OuterVolumeSpecName: "host") pod "1940606a-a63d-458a-b74a-0aec9e06d727" (UID: "1940606a-a63d-458a-b74a-0aec9e06d727"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.258749 4948 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1940606a-a63d-458a-b74a-0aec9e06d727-host\") on node \"crc\" DevicePath \"\"" Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.264044 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1940606a-a63d-458a-b74a-0aec9e06d727-kube-api-access-hrrld" (OuterVolumeSpecName: "kube-api-access-hrrld") pod "1940606a-a63d-458a-b74a-0aec9e06d727" (UID: "1940606a-a63d-458a-b74a-0aec9e06d727"). InnerVolumeSpecName "kube-api-access-hrrld". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:51:23 crc kubenswrapper[4948]: I0120 20:51:23.360226 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hrrld\" (UniqueName: \"kubernetes.io/projected/1940606a-a63d-458a-b74a-0aec9e06d727-kube-api-access-hrrld\") on node \"crc\" DevicePath \"\"" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.049550 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51291f6ef56c054030bfeb19cb13e97cba155bbcc81c9b220960498f3db112b0" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.049644 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-cbs49" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.382558 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-gvkkr"] Jan 20 20:51:24 crc kubenswrapper[4948]: E0120 20:51:24.382998 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1940606a-a63d-458a-b74a-0aec9e06d727" containerName="container-00" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.383012 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1940606a-a63d-458a-b74a-0aec9e06d727" containerName="container-00" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.383190 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1940606a-a63d-458a-b74a-0aec9e06d727" containerName="container-00" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.383789 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.386151 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pz7tb"/"default-dockercfg-hrmzv" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.487812 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztzb9\" (UniqueName: \"kubernetes.io/projected/3c3306fd-2d96-405d-89bc-566751e82c77-kube-api-access-ztzb9\") pod \"crc-debug-gvkkr\" (UID: \"3c3306fd-2d96-405d-89bc-566751e82c77\") " pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.488146 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3c3306fd-2d96-405d-89bc-566751e82c77-host\") pod \"crc-debug-gvkkr\" (UID: \"3c3306fd-2d96-405d-89bc-566751e82c77\") " pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.579875 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1940606a-a63d-458a-b74a-0aec9e06d727" path="/var/lib/kubelet/pods/1940606a-a63d-458a-b74a-0aec9e06d727/volumes" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.590064 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3c3306fd-2d96-405d-89bc-566751e82c77-host\") pod \"crc-debug-gvkkr\" (UID: \"3c3306fd-2d96-405d-89bc-566751e82c77\") " pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.590136 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3c3306fd-2d96-405d-89bc-566751e82c77-host\") pod \"crc-debug-gvkkr\" (UID: \"3c3306fd-2d96-405d-89bc-566751e82c77\") " pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.590218 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztzb9\" (UniqueName: \"kubernetes.io/projected/3c3306fd-2d96-405d-89bc-566751e82c77-kube-api-access-ztzb9\") pod \"crc-debug-gvkkr\" (UID: \"3c3306fd-2d96-405d-89bc-566751e82c77\") " pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.607050 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztzb9\" (UniqueName: \"kubernetes.io/projected/3c3306fd-2d96-405d-89bc-566751e82c77-kube-api-access-ztzb9\") pod \"crc-debug-gvkkr\" (UID: \"3c3306fd-2d96-405d-89bc-566751e82c77\") " pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:24 crc kubenswrapper[4948]: I0120 20:51:24.701487 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:25 crc kubenswrapper[4948]: I0120 20:51:25.082382 4948 generic.go:334] "Generic (PLEG): container finished" podID="3c3306fd-2d96-405d-89bc-566751e82c77" containerID="7efba51c96b5facd7685081e449cc1e750ce4d5142a3d809021bdd3cb8da454d" exitCode=0 Jan 20 20:51:25 crc kubenswrapper[4948]: I0120 20:51:25.082446 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" event={"ID":"3c3306fd-2d96-405d-89bc-566751e82c77","Type":"ContainerDied","Data":"7efba51c96b5facd7685081e449cc1e750ce4d5142a3d809021bdd3cb8da454d"} Jan 20 20:51:25 crc kubenswrapper[4948]: I0120 20:51:25.082484 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" event={"ID":"3c3306fd-2d96-405d-89bc-566751e82c77","Type":"ContainerStarted","Data":"85813c1f37030da300c9b7170f46a9cf605b8894fc1120b19c538d9504dd2cd5"} Jan 20 20:51:25 crc kubenswrapper[4948]: I0120 20:51:25.662195 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-gvkkr"] Jan 20 20:51:25 crc kubenswrapper[4948]: I0120 20:51:25.671556 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-gvkkr"] Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.198039 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/extract/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.208145 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.212072 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/util/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.220240 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/pull/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.298134 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-6vfzk_ef41048d-32d0-4b45-98ef-181e13e62c26/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.328081 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3c3306fd-2d96-405d-89bc-566751e82c77-host\") pod \"3c3306fd-2d96-405d-89bc-566751e82c77\" (UID: \"3c3306fd-2d96-405d-89bc-566751e82c77\") " Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.328175 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c3306fd-2d96-405d-89bc-566751e82c77-host" (OuterVolumeSpecName: "host") pod "3c3306fd-2d96-405d-89bc-566751e82c77" (UID: "3c3306fd-2d96-405d-89bc-566751e82c77"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.328181 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztzb9\" (UniqueName: \"kubernetes.io/projected/3c3306fd-2d96-405d-89bc-566751e82c77-kube-api-access-ztzb9\") pod \"3c3306fd-2d96-405d-89bc-566751e82c77\" (UID: \"3c3306fd-2d96-405d-89bc-566751e82c77\") " Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.328658 4948 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3c3306fd-2d96-405d-89bc-566751e82c77-host\") on node \"crc\" DevicePath \"\"" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.336199 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c3306fd-2d96-405d-89bc-566751e82c77-kube-api-access-ztzb9" (OuterVolumeSpecName: "kube-api-access-ztzb9") pod "3c3306fd-2d96-405d-89bc-566751e82c77" (UID: "3c3306fd-2d96-405d-89bc-566751e82c77"). InnerVolumeSpecName "kube-api-access-ztzb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.348872 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-2k89b_d6a36d62-a638-45c5-956a-12cb6f1ced24/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.365561 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-6mp4q_d507465c-a0e3-494e-9e20-ef8c3517e059/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.430208 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztzb9\" (UniqueName: \"kubernetes.io/projected/3c3306fd-2d96-405d-89bc-566751e82c77-kube-api-access-ztzb9\") on node \"crc\" DevicePath \"\"" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.432889 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-x9hmd_b78116d1-a584-49fa-ab14-86f78ce62420/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.442726 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-m8f25_d8461566-61e6-495d-b1ad-c0178c2eb849/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.469026 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-b7j48_6f758308-6a33-4dc5-996e-beae970d4083/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.581422 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c3306fd-2d96-405d-89bc-566751e82c77" path="/var/lib/kubelet/pods/3c3306fd-2d96-405d-89bc-566751e82c77/volumes" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.745846 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-xgc4z_09ceeac6-c058-41a8-a0d6-07b4bde73893/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.755851 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-6xdw4_233a0ffe-a99e-4268-93ed-a2a20cb2c7ab/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.828344 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-hkwvp_ed91900c-0efb-4184-8d92-d11fb7ae82b7/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.841114 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-snszj_38d63cbf-6bc2-4c48-9905-88c65334d42a/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.876140 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-7qmgq_61ba0da3-99a5-4b43-a2fb-190260ab8f3a/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.916134 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-5mlm4_61da457f-7595-4df3-8705-e34138ec590d/manager/0.log" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.927879 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-774kh"] Jan 20 20:51:26 crc kubenswrapper[4948]: E0120 20:51:26.928285 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c3306fd-2d96-405d-89bc-566751e82c77" containerName="container-00" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.928303 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c3306fd-2d96-405d-89bc-566751e82c77" containerName="container-00" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.928540 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c3306fd-2d96-405d-89bc-566751e82c77" containerName="container-00" Jan 20 20:51:26 crc kubenswrapper[4948]: I0120 20:51:26.929185 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.001413 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-phpvf_094e4268-74c4-40e5-8f39-b6090b284c27/manager/0.log" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.032377 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-k9n27_d4f3075e-95f9-432a-bfcd-621b6cbe2615/manager/0.log" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.041831 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-host\") pod \"crc-debug-774kh\" (UID: \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\") " pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.041887 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt5z7\" (UniqueName: \"kubernetes.io/projected/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-kube-api-access-dt5z7\") pod \"crc-debug-774kh\" (UID: \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\") " pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.048131 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl_40c9112e-c5f0-4cf7-8039-f50ff4640ba9/manager/0.log" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.107352 4948 scope.go:117] "RemoveContainer" containerID="7efba51c96b5facd7685081e449cc1e750ce4d5142a3d809021bdd3cb8da454d" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.107494 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-gvkkr" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.143334 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-host\") pod \"crc-debug-774kh\" (UID: \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\") " pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.143382 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt5z7\" (UniqueName: \"kubernetes.io/projected/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-kube-api-access-dt5z7\") pod \"crc-debug-774kh\" (UID: \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\") " pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.143772 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-host\") pod \"crc-debug-774kh\" (UID: \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\") " pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.150742 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5fcf846598-7x9nh_6d523c92-ebbc-4860-9bcc-45ef88372f2b/operator/0.log" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.178246 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt5z7\" (UniqueName: \"kubernetes.io/projected/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-kube-api-access-dt5z7\") pod \"crc-debug-774kh\" (UID: \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\") " pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:27 crc kubenswrapper[4948]: I0120 20:51:27.252827 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:27 crc kubenswrapper[4948]: W0120 20:51:27.283757 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00786d3e_b4b5_4534_8d6d_aa58c0ac41f0.slice/crio-e840dda7dca64403a44f07fac8b0397624e35e6c814f154f2acf59cf6391e095 WatchSource:0}: Error finding container e840dda7dca64403a44f07fac8b0397624e35e6c814f154f2acf59cf6391e095: Status 404 returned error can't find the container with id e840dda7dca64403a44f07fac8b0397624e35e6c814f154f2acf59cf6391e095 Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.171209 4948 generic.go:334] "Generic (PLEG): container finished" podID="00786d3e-b4b5-4534-8d6d-aa58c0ac41f0" containerID="fbfe4bdcd265c600b09198dcae7b4d3baf3f2d56deef1c735586fe7cacd702cd" exitCode=0 Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.171849 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/crc-debug-774kh" event={"ID":"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0","Type":"ContainerDied","Data":"fbfe4bdcd265c600b09198dcae7b4d3baf3f2d56deef1c735586fe7cacd702cd"} Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.171883 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/crc-debug-774kh" event={"ID":"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0","Type":"ContainerStarted","Data":"e840dda7dca64403a44f07fac8b0397624e35e6c814f154f2acf59cf6391e095"} Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.227463 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-774kh"] Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.242777 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pz7tb/crc-debug-774kh"] Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.258213 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c9b95f56c-kd6qw_0a88f765-46a8-4252-832c-ccf595a0f1d2/manager/0.log" Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.268485 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-fckw5_e98fafb2-a9ef-4252-a236-be3c009d42b2/registry-server/0.log" Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.322378 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-zpq74_ebd95a40-2e8d-481a-a842-b8fe125ebdb2/manager/0.log" Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.357068 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-wnzkb_febd743e-d499-4cc9-9e66-29ac1b4ca89c/manager/0.log" Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.382009 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-9m5nk_f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0/operator/0.log" Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.405351 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-56544cf655-ngkkb_80950323-03e4-4aa3-ba31-06043e2a51b9/manager/0.log" Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.468237 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-rsb9m_910fc292-11a6-47de-80e6-59cc027e972c/manager/0.log" Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.477537 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-2bt9t_5a25aeaf-8323-46a9-8c2a-e000321478ee/manager/0.log" Jan 20 20:51:28 crc kubenswrapper[4948]: I0120 20:51:28.493007 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-52fnn_76b9cf9a-a325-4528-8f35-3d0b94060ef1/manager/0.log" Jan 20 20:51:29 crc kubenswrapper[4948]: I0120 20:51:29.278316 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:29 crc kubenswrapper[4948]: I0120 20:51:29.388578 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dt5z7\" (UniqueName: \"kubernetes.io/projected/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-kube-api-access-dt5z7\") pod \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\" (UID: \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\") " Jan 20 20:51:29 crc kubenswrapper[4948]: I0120 20:51:29.389248 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-host\") pod \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\" (UID: \"00786d3e-b4b5-4534-8d6d-aa58c0ac41f0\") " Jan 20 20:51:29 crc kubenswrapper[4948]: I0120 20:51:29.389383 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-host" (OuterVolumeSpecName: "host") pod "00786d3e-b4b5-4534-8d6d-aa58c0ac41f0" (UID: "00786d3e-b4b5-4534-8d6d-aa58c0ac41f0"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 20 20:51:29 crc kubenswrapper[4948]: I0120 20:51:29.390140 4948 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-host\") on node \"crc\" DevicePath \"\"" Jan 20 20:51:29 crc kubenswrapper[4948]: I0120 20:51:29.402894 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-kube-api-access-dt5z7" (OuterVolumeSpecName: "kube-api-access-dt5z7") pod "00786d3e-b4b5-4534-8d6d-aa58c0ac41f0" (UID: "00786d3e-b4b5-4534-8d6d-aa58c0ac41f0"). InnerVolumeSpecName "kube-api-access-dt5z7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:51:29 crc kubenswrapper[4948]: I0120 20:51:29.492851 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dt5z7\" (UniqueName: \"kubernetes.io/projected/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0-kube-api-access-dt5z7\") on node \"crc\" DevicePath \"\"" Jan 20 20:51:30 crc kubenswrapper[4948]: I0120 20:51:30.195162 4948 scope.go:117] "RemoveContainer" containerID="fbfe4bdcd265c600b09198dcae7b4d3baf3f2d56deef1c735586fe7cacd702cd" Jan 20 20:51:30 crc kubenswrapper[4948]: I0120 20:51:30.195230 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/crc-debug-774kh" Jan 20 20:51:30 crc kubenswrapper[4948]: I0120 20:51:30.581776 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00786d3e-b4b5-4534-8d6d-aa58c0ac41f0" path="/var/lib/kubelet/pods/00786d3e-b4b5-4534-8d6d-aa58c0ac41f0/volumes" Jan 20 20:51:32 crc kubenswrapper[4948]: I0120 20:51:32.581082 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:51:32 crc kubenswrapper[4948]: E0120 20:51:32.582036 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:51:34 crc kubenswrapper[4948]: I0120 20:51:34.189515 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4pnmq_203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3/control-plane-machine-set-operator/0.log" Jan 20 20:51:34 crc kubenswrapper[4948]: I0120 20:51:34.207687 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-hxwlm_666e60ed-f213-4af4-a4a9-969864d1fd0e/kube-rbac-proxy/0.log" Jan 20 20:51:34 crc kubenswrapper[4948]: I0120 20:51:34.217526 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-hxwlm_666e60ed-f213-4af4-a4a9-969864d1fd0e/machine-api-operator/0.log" Jan 20 20:51:39 crc kubenswrapper[4948]: I0120 20:51:39.743099 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-dt9ht_0a4be8e0-f8af-4f0d-8230-37fd71e2cc81/cert-manager-controller/0.log" Jan 20 20:51:39 crc kubenswrapper[4948]: I0120 20:51:39.760833 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-82hbd_1973fd2f-85c7-4fbb-92b0-0973744d9d00/cert-manager-cainjector/0.log" Jan 20 20:51:39 crc kubenswrapper[4948]: I0120 20:51:39.771211 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-fckz7_5474f4e5-fa0d-4931-b732-4a1d0e06c858/cert-manager-webhook/0.log" Jan 20 20:51:46 crc kubenswrapper[4948]: I0120 20:51:46.071509 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-czsd9_a0bd44ac-39a0-4aed-8a23-d12330d46924/nmstate-console-plugin/0.log" Jan 20 20:51:46 crc kubenswrapper[4948]: I0120 20:51:46.091018 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-nqpgc_34b9a637-f29d-49ad-961c-d923e71907e1/nmstate-handler/0.log" Jan 20 20:51:46 crc kubenswrapper[4948]: I0120 20:51:46.105490 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jq57s_d7a43a4d-6505-4105-bfb8-c1239d0436e8/nmstate-metrics/0.log" Jan 20 20:51:46 crc kubenswrapper[4948]: I0120 20:51:46.114319 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jq57s_d7a43a4d-6505-4105-bfb8-c1239d0436e8/kube-rbac-proxy/0.log" Jan 20 20:51:46 crc kubenswrapper[4948]: I0120 20:51:46.143053 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-9ldq2_d72955e0-ce7e-4d8f-be8a-b22eee46ec69/nmstate-operator/0.log" Jan 20 20:51:46 crc kubenswrapper[4948]: I0120 20:51:46.161165 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-6lt8c_b4431242-1662-43bd-bbfc-192d87f5393b/nmstate-webhook/0.log" Jan 20 20:51:47 crc kubenswrapper[4948]: I0120 20:51:47.569751 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:51:47 crc kubenswrapper[4948]: E0120 20:51:47.570227 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:51:58 crc kubenswrapper[4948]: I0120 20:51:58.096110 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/controller/0.log" Jan 20 20:51:58 crc kubenswrapper[4948]: I0120 20:51:58.103018 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/kube-rbac-proxy/0.log" Jan 20 20:51:58 crc kubenswrapper[4948]: I0120 20:51:58.119820 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/controller/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.148957 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.161076 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/reloader/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.166115 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr-metrics/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.180010 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.194304 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy-frr/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.205644 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-frr-files/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.220134 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-reloader/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.226047 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-metrics/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.247201 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-mxgmc_06d4b8b1-3c5f-4736-9492-bc33db43f510/frr-k8s-webhook-server/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.277551 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7998c69bcc-rkwld_a422b9d2-2fe8-485a-a7c7-fb0fa96706c9/manager/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.296458 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-989f8776d-mst22_3eb6ce14-f5fb-4e93-8f16-d4b0eec67237/webhook-server/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.570077 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:51:59 crc kubenswrapper[4948]: E0120 20:51:59.570349 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.586593 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/speaker/0.log" Jan 20 20:51:59 crc kubenswrapper[4948]: I0120 20:51:59.603862 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/kube-rbac-proxy/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.141809 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8_d79fcc60-85eb-450d-8d37-5b00b0af4ea0/extract/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.155626 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8_d79fcc60-85eb-450d-8d37-5b00b0af4ea0/util/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.166422 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcqg7w8_d79fcc60-85eb-450d-8d37-5b00b0af4ea0/pull/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.182714 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7_d0fed87f-472d-480c-8006-2c2dc60df61e/extract/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.189189 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7_d0fed87f-472d-480c-8006-2c2dc60df61e/util/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.203569 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71367ct7_d0fed87f-472d-480c-8006-2c2dc60df61e/pull/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.329972 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-db5tw_0120cd08-de07-487b-af62-88990bca428d/registry-server/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.336570 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-db5tw_0120cd08-de07-487b-af62-88990bca428d/extract-utilities/0.log" Jan 20 20:52:05 crc kubenswrapper[4948]: I0120 20:52:05.353114 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-db5tw_0120cd08-de07-487b-af62-88990bca428d/extract-content/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.083964 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2jd7_52223d24-be7c-4761-8f46-efcc30f37f8b/registry-server/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.089079 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2jd7_52223d24-be7c-4761-8f46-efcc30f37f8b/extract-utilities/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.100937 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-h2jd7_52223d24-be7c-4761-8f46-efcc30f37f8b/extract-content/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.115625 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-z8fwl_7cf25c7d-e351-4a2e-8992-47542811fb1f/marketplace-operator/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.116042 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-z8fwl_7cf25c7d-e351-4a2e-8992-47542811fb1f/marketplace-operator/1.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.232447 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsxfw_f8d1e5d7-2511-47ad-b240-677792863a32/registry-server/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.237293 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsxfw_f8d1e5d7-2511-47ad-b240-677792863a32/extract-utilities/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.250260 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-hsxfw_f8d1e5d7-2511-47ad-b240-677792863a32/extract-content/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.731258 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kpqs5_29572b48-7ca5-4e09-83d8-dcf2cc40682b/registry-server/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.743210 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kpqs5_29572b48-7ca5-4e09-83d8-dcf2cc40682b/extract-utilities/0.log" Jan 20 20:52:06 crc kubenswrapper[4948]: I0120 20:52:06.762446 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-kpqs5_29572b48-7ca5-4e09-83d8-dcf2cc40682b/extract-content/0.log" Jan 20 20:52:10 crc kubenswrapper[4948]: I0120 20:52:10.570755 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:52:10 crc kubenswrapper[4948]: E0120 20:52:10.571579 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:52:24 crc kubenswrapper[4948]: I0120 20:52:24.570167 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:52:24 crc kubenswrapper[4948]: E0120 20:52:24.570957 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:52:38 crc kubenswrapper[4948]: I0120 20:52:38.570399 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:52:38 crc kubenswrapper[4948]: E0120 20:52:38.571257 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:52:50 crc kubenswrapper[4948]: I0120 20:52:50.573762 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:52:50 crc kubenswrapper[4948]: E0120 20:52:50.574422 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:53:05 crc kubenswrapper[4948]: I0120 20:53:05.571061 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:53:05 crc kubenswrapper[4948]: E0120 20:53:05.571787 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:53:19 crc kubenswrapper[4948]: I0120 20:53:19.570520 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:53:19 crc kubenswrapper[4948]: E0120 20:53:19.571295 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.416833 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dwq66"] Jan 20 20:53:27 crc kubenswrapper[4948]: E0120 20:53:27.417738 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00786d3e-b4b5-4534-8d6d-aa58c0ac41f0" containerName="container-00" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.417751 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="00786d3e-b4b5-4534-8d6d-aa58c0ac41f0" containerName="container-00" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.418015 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="00786d3e-b4b5-4534-8d6d-aa58c0ac41f0" containerName="container-00" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.419412 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.438893 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dwq66"] Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.492845 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4mk5\" (UniqueName: \"kubernetes.io/projected/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-kube-api-access-h4mk5\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.492952 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-catalog-content\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.493127 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-utilities\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.595186 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-utilities\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.595258 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4mk5\" (UniqueName: \"kubernetes.io/projected/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-kube-api-access-h4mk5\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.595329 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-catalog-content\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.597223 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-utilities\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.597803 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-catalog-content\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.640436 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4mk5\" (UniqueName: \"kubernetes.io/projected/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-kube-api-access-h4mk5\") pod \"redhat-operators-dwq66\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:27 crc kubenswrapper[4948]: I0120 20:53:27.738278 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:28 crc kubenswrapper[4948]: I0120 20:53:28.329002 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dwq66"] Jan 20 20:53:28 crc kubenswrapper[4948]: I0120 20:53:28.899245 4948 generic.go:334] "Generic (PLEG): container finished" podID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerID="8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64" exitCode=0 Jan 20 20:53:28 crc kubenswrapper[4948]: I0120 20:53:28.899582 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwq66" event={"ID":"0b100d69-21f0-4a17-aeaa-c789d8e54e2f","Type":"ContainerDied","Data":"8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64"} Jan 20 20:53:28 crc kubenswrapper[4948]: I0120 20:53:28.899641 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwq66" event={"ID":"0b100d69-21f0-4a17-aeaa-c789d8e54e2f","Type":"ContainerStarted","Data":"f0f0aa0c9398a64acdf85b9956dfb82402944afd6a40944a53d8dd1347b6cd77"} Jan 20 20:53:28 crc kubenswrapper[4948]: I0120 20:53:28.902757 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:53:30 crc kubenswrapper[4948]: I0120 20:53:30.917015 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwq66" event={"ID":"0b100d69-21f0-4a17-aeaa-c789d8e54e2f","Type":"ContainerStarted","Data":"4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900"} Jan 20 20:53:31 crc kubenswrapper[4948]: I0120 20:53:31.570236 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:53:31 crc kubenswrapper[4948]: E0120 20:53:31.570699 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:53:33 crc kubenswrapper[4948]: I0120 20:53:33.942696 4948 generic.go:334] "Generic (PLEG): container finished" podID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerID="4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900" exitCode=0 Jan 20 20:53:33 crc kubenswrapper[4948]: I0120 20:53:33.944127 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwq66" event={"ID":"0b100d69-21f0-4a17-aeaa-c789d8e54e2f","Type":"ContainerDied","Data":"4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900"} Jan 20 20:53:34 crc kubenswrapper[4948]: I0120 20:53:34.955639 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwq66" event={"ID":"0b100d69-21f0-4a17-aeaa-c789d8e54e2f","Type":"ContainerStarted","Data":"ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa"} Jan 20 20:53:34 crc kubenswrapper[4948]: I0120 20:53:34.988783 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dwq66" podStartSLOduration=2.428124248 podStartE2EDuration="7.988756703s" podCreationTimestamp="2026-01-20 20:53:27 +0000 UTC" firstStartedPulling="2026-01-20 20:53:28.902438311 +0000 UTC m=+3836.853163280" lastFinishedPulling="2026-01-20 20:53:34.463070766 +0000 UTC m=+3842.413795735" observedRunningTime="2026-01-20 20:53:34.978165952 +0000 UTC m=+3842.928890941" watchObservedRunningTime="2026-01-20 20:53:34.988756703 +0000 UTC m=+3842.939481672" Jan 20 20:53:37 crc kubenswrapper[4948]: I0120 20:53:37.562105 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/controller/0.log" Jan 20 20:53:37 crc kubenswrapper[4948]: I0120 20:53:37.622492 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-q4qhx_04d1e8ae-e88d-4357-87c8-c15899e9ce23/kube-rbac-proxy/0.log" Jan 20 20:53:37 crc kubenswrapper[4948]: I0120 20:53:37.645898 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/controller/0.log" Jan 20 20:53:37 crc kubenswrapper[4948]: I0120 20:53:37.739070 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:37 crc kubenswrapper[4948]: I0120 20:53:37.739106 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.553113 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-dt9ht_0a4be8e0-f8af-4f0d-8230-37fd71e2cc81/cert-manager-controller/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.570084 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-82hbd_1973fd2f-85c7-4fbb-92b0-0973744d9d00/cert-manager-cainjector/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.587494 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-fckz7_5474f4e5-fa0d-4931-b732-4a1d0e06c858/cert-manager-webhook/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.776613 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.786013 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/reloader/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.793246 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/frr-metrics/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.799243 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.806646 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/kube-rbac-proxy-frr/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.813160 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-frr-files/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.817628 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-reloader/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.823689 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-khbv6_2f322a0b-2e68-429d-b734-c7e20e346a47/cp-metrics/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.825572 4948 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dwq66" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="registry-server" probeResult="failure" output=< Jan 20 20:53:38 crc kubenswrapper[4948]: timeout: failed to connect service ":50051" within 1s Jan 20 20:53:38 crc kubenswrapper[4948]: > Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.840456 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-mxgmc_06d4b8b1-3c5f-4736-9492-bc33db43f510/frr-k8s-webhook-server/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.868288 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7998c69bcc-rkwld_a422b9d2-2fe8-485a-a7c7-fb0fa96706c9/manager/0.log" Jan 20 20:53:38 crc kubenswrapper[4948]: I0120 20:53:38.879820 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-989f8776d-mst22_3eb6ce14-f5fb-4e93-8f16-d4b0eec67237/webhook-server/0.log" Jan 20 20:53:39 crc kubenswrapper[4948]: I0120 20:53:39.175612 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/speaker/0.log" Jan 20 20:53:39 crc kubenswrapper[4948]: I0120 20:53:39.184280 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fl6v6_9a99fce2-43d3-43f4-bada-ca2b9f94673c/kube-rbac-proxy/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.337650 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/extract/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.355961 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/util/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.362648 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/pull/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.444949 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-6vfzk_ef41048d-32d0-4b45-98ef-181e13e62c26/manager/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.480146 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-2k89b_d6a36d62-a638-45c5-956a-12cb6f1ced24/manager/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.535217 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-6mp4q_d507465c-a0e3-494e-9e20-ef8c3517e059/manager/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.594960 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-x9hmd_b78116d1-a584-49fa-ab14-86f78ce62420/manager/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.611314 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-m8f25_d8461566-61e6-495d-b1ad-c0178c2eb849/manager/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.763323 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-b7j48_6f758308-6a33-4dc5-996e-beae970d4083/manager/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.981145 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-xgc4z_09ceeac6-c058-41a8-a0d6-07b4bde73893/manager/0.log" Jan 20 20:53:40 crc kubenswrapper[4948]: I0120 20:53:40.992170 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-6xdw4_233a0ffe-a99e-4268-93ed-a2a20cb2c7ab/manager/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.061025 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-hkwvp_ed91900c-0efb-4184-8d92-d11fb7ae82b7/manager/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.074219 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-snszj_38d63cbf-6bc2-4c48-9905-88c65334d42a/manager/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.116555 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-7qmgq_61ba0da3-99a5-4b43-a2fb-190260ab8f3a/manager/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.165394 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-5mlm4_61da457f-7595-4df3-8705-e34138ec590d/manager/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.238489 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-phpvf_094e4268-74c4-40e5-8f39-b6090b284c27/manager/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.259613 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-k9n27_d4f3075e-95f9-432a-bfcd-621b6cbe2615/manager/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.288788 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl_40c9112e-c5f0-4cf7-8039-f50ff4640ba9/manager/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.390268 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5fcf846598-7x9nh_6d523c92-ebbc-4860-9bcc-45ef88372f2b/operator/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.720805 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-dt9ht_0a4be8e0-f8af-4f0d-8230-37fd71e2cc81/cert-manager-controller/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.743230 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-82hbd_1973fd2f-85c7-4fbb-92b0-0973744d9d00/cert-manager-cainjector/0.log" Jan 20 20:53:41 crc kubenswrapper[4948]: I0120 20:53:41.763347 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-fckz7_5474f4e5-fa0d-4931-b732-4a1d0e06c858/cert-manager-webhook/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.477990 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c9b95f56c-kd6qw_0a88f765-46a8-4252-832c-ccf595a0f1d2/manager/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.497458 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-fckw5_e98fafb2-a9ef-4252-a236-be3c009d42b2/registry-server/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.553512 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-zpq74_ebd95a40-2e8d-481a-a842-b8fe125ebdb2/manager/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.574007 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-wnzkb_febd743e-d499-4cc9-9e66-29ac1b4ca89c/manager/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.596942 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-9m5nk_f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0/operator/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.622541 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-56544cf655-ngkkb_80950323-03e4-4aa3-ba31-06043e2a51b9/manager/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.699088 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-rsb9m_910fc292-11a6-47de-80e6-59cc027e972c/manager/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.718763 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-2bt9t_5a25aeaf-8323-46a9-8c2a-e000321478ee/manager/0.log" Jan 20 20:53:42 crc kubenswrapper[4948]: I0120 20:53:42.747247 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-52fnn_76b9cf9a-a325-4528-8f35-3d0b94060ef1/manager/0.log" Jan 20 20:53:43 crc kubenswrapper[4948]: I0120 20:53:43.029154 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4pnmq_203ab3d2-7a0b-4558-a3f8-c95a33b1c7f3/control-plane-machine-set-operator/0.log" Jan 20 20:53:43 crc kubenswrapper[4948]: I0120 20:53:43.043662 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-hxwlm_666e60ed-f213-4af4-a4a9-969864d1fd0e/kube-rbac-proxy/0.log" Jan 20 20:53:43 crc kubenswrapper[4948]: I0120 20:53:43.058570 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-hxwlm_666e60ed-f213-4af4-a4a9-969864d1fd0e/machine-api-operator/0.log" Jan 20 20:53:43 crc kubenswrapper[4948]: I0120 20:53:43.570657 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:53:43 crc kubenswrapper[4948]: E0120 20:53:43.570902 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.525738 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/extract/0.log" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.548928 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/util/0.log" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.573118 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a2adea3bcd090b5e6debfd54bbf95b89c13aa2ccfe94a9b5d7b78ae8e8p2lm8_349488b0-c355-4358-8fb2-1979301298a1/pull/0.log" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.655215 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7ddb5c749-6vfzk_ef41048d-32d0-4b45-98ef-181e13e62c26/manager/0.log" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.702060 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-9b68f5989-2k89b_d6a36d62-a638-45c5-956a-12cb6f1ced24/manager/0.log" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.715745 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-9f958b845-6mp4q_d507465c-a0e3-494e-9e20-ef8c3517e059/manager/0.log" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.778036 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-c6994669c-x9hmd_b78116d1-a584-49fa-ab14-86f78ce62420/manager/0.log" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.797395 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-m8f25_d8461566-61e6-495d-b1ad-c0178c2eb849/manager/0.log" Jan 20 20:53:44 crc kubenswrapper[4948]: I0120 20:53:44.819292 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-b7j48_6f758308-6a33-4dc5-996e-beae970d4083/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.040027 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-77c48c7859-xgc4z_09ceeac6-c058-41a8-a0d6-07b4bde73893/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.066137 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-78757b4889-6xdw4_233a0ffe-a99e-4268-93ed-a2a20cb2c7ab/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.144062 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-767fdc4f47-hkwvp_ed91900c-0efb-4184-8d92-d11fb7ae82b7/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.157945 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-864f6b75bf-snszj_38d63cbf-6bc2-4c48-9905-88c65334d42a/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.160491 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-czsd9_a0bd44ac-39a0-4aed-8a23-d12330d46924/nmstate-console-plugin/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.181813 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-nqpgc_34b9a637-f29d-49ad-961c-d923e71907e1/nmstate-handler/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.192366 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-7qmgq_61ba0da3-99a5-4b43-a2fb-190260ab8f3a/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.194605 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jq57s_d7a43a4d-6505-4105-bfb8-c1239d0436e8/nmstate-metrics/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.213096 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-jq57s_d7a43a4d-6505-4105-bfb8-c1239d0436e8/kube-rbac-proxy/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.230659 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-9ldq2_d72955e0-ce7e-4d8f-be8a-b22eee46ec69/nmstate-operator/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.233919 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-cb4666565-5mlm4_61da457f-7595-4df3-8705-e34138ec590d/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.253971 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-6lt8c_b4431242-1662-43bd-bbfc-192d87f5393b/nmstate-webhook/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.319757 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-65849867d6-phpvf_094e4268-74c4-40e5-8f39-b6090b284c27/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.331739 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7fc9b76cf6-k9n27_d4f3075e-95f9-432a-bfcd-621b6cbe2615/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.348193 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-6b68b8b854xxqhl_40c9112e-c5f0-4cf7-8039-f50ff4640ba9/manager/0.log" Jan 20 20:53:45 crc kubenswrapper[4948]: I0120 20:53:45.468453 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-5fcf846598-7x9nh_6d523c92-ebbc-4860-9bcc-45ef88372f2b/operator/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.516536 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7c9b95f56c-kd6qw_0a88f765-46a8-4252-832c-ccf595a0f1d2/manager/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.532214 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-fckw5_e98fafb2-a9ef-4252-a236-be3c009d42b2/registry-server/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.583107 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-zpq74_ebd95a40-2e8d-481a-a842-b8fe125ebdb2/manager/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.602629 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-686df47fcb-wnzkb_febd743e-d499-4cc9-9e66-29ac1b4ca89c/manager/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.627352 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-9m5nk_f2fc1e50-d924-4e66-9ba5-b7fcb44b4ed0/operator/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.658136 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-56544cf655-ngkkb_80950323-03e4-4aa3-ba31-06043e2a51b9/manager/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.718831 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-5f8f495fcf-rsb9m_910fc292-11a6-47de-80e6-59cc027e972c/manager/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.730412 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-7cd8bc9dbb-2bt9t_5a25aeaf-8323-46a9-8c2a-e000321478ee/manager/0.log" Jan 20 20:53:46 crc kubenswrapper[4948]: I0120 20:53:46.740934 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-64cd966744-52fnn_76b9cf9a-a325-4528-8f35-3d0b94060ef1/manager/0.log" Jan 20 20:53:47 crc kubenswrapper[4948]: I0120 20:53:47.782034 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:47 crc kubenswrapper[4948]: I0120 20:53:47.832151 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.033031 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dwq66"] Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.886073 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/kube-multus-additional-cni-plugins/0.log" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.892339 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/egress-router-binary-copy/0.log" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.902014 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/cni-plugins/0.log" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.910995 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/bond-cni-plugin/0.log" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.920109 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/routeoverride-cni/0.log" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.930734 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/whereabouts-cni-bincopy/0.log" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.949525 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-additional-cni-plugins-ms8h8_c6c006e4-2994-4ab8-bdfc-90703054f20d/whereabouts-cni/0.log" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.987825 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-k4fgt_34a4c701-23f8-4d4e-97c0-7ceeaa229d0f/multus-admission-controller/0.log" Jan 20 20:53:48 crc kubenswrapper[4948]: I0120 20:53:48.992883 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-admission-controller-857f4d67dd-k4fgt_34a4c701-23f8-4d4e-97c0-7ceeaa229d0f/kube-rbac-proxy/0.log" Jan 20 20:53:49 crc kubenswrapper[4948]: I0120 20:53:49.035363 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/1.log" Jan 20 20:53:49 crc kubenswrapper[4948]: I0120 20:53:49.104822 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-qttfm_e21ac8a2-1e79-4191-b809-75085d432b31/kube-multus/2.log" Jan 20 20:53:49 crc kubenswrapper[4948]: I0120 20:53:49.118045 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dwq66" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="registry-server" containerID="cri-o://ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa" gracePeriod=2 Jan 20 20:53:49 crc kubenswrapper[4948]: I0120 20:53:49.138234 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-h4c6s_dbfcfce6-0ab8-40ba-80b2-d391a7dd5418/network-metrics-daemon/0.log" Jan 20 20:53:49 crc kubenswrapper[4948]: I0120 20:53:49.144626 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_network-metrics-daemon-h4c6s_dbfcfce6-0ab8-40ba-80b2-d391a7dd5418/kube-rbac-proxy/0.log" Jan 20 20:53:49 crc kubenswrapper[4948]: I0120 20:53:49.975038 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.081410 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-utilities\") pod \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.081507 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-catalog-content\") pod \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.081610 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4mk5\" (UniqueName: \"kubernetes.io/projected/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-kube-api-access-h4mk5\") pod \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\" (UID: \"0b100d69-21f0-4a17-aeaa-c789d8e54e2f\") " Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.082675 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-utilities" (OuterVolumeSpecName: "utilities") pod "0b100d69-21f0-4a17-aeaa-c789d8e54e2f" (UID: "0b100d69-21f0-4a17-aeaa-c789d8e54e2f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.082817 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.094018 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-kube-api-access-h4mk5" (OuterVolumeSpecName: "kube-api-access-h4mk5") pod "0b100d69-21f0-4a17-aeaa-c789d8e54e2f" (UID: "0b100d69-21f0-4a17-aeaa-c789d8e54e2f"). InnerVolumeSpecName "kube-api-access-h4mk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.131906 4948 generic.go:334] "Generic (PLEG): container finished" podID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerID="ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa" exitCode=0 Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.131958 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwq66" event={"ID":"0b100d69-21f0-4a17-aeaa-c789d8e54e2f","Type":"ContainerDied","Data":"ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa"} Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.132010 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dwq66" event={"ID":"0b100d69-21f0-4a17-aeaa-c789d8e54e2f","Type":"ContainerDied","Data":"f0f0aa0c9398a64acdf85b9956dfb82402944afd6a40944a53d8dd1347b6cd77"} Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.132034 4948 scope.go:117] "RemoveContainer" containerID="ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.132231 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dwq66" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.151683 4948 scope.go:117] "RemoveContainer" containerID="4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.181943 4948 scope.go:117] "RemoveContainer" containerID="8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.184943 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4mk5\" (UniqueName: \"kubernetes.io/projected/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-kube-api-access-h4mk5\") on node \"crc\" DevicePath \"\"" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.215002 4948 scope.go:117] "RemoveContainer" containerID="ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa" Jan 20 20:53:50 crc kubenswrapper[4948]: E0120 20:53:50.215360 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa\": container with ID starting with ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa not found: ID does not exist" containerID="ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.215401 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa"} err="failed to get container status \"ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa\": rpc error: code = NotFound desc = could not find container \"ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa\": container with ID starting with ac83283ef8bca9e7da2169f907ce2ec58033a4f044e6f22fa685d2981f3aa7fa not found: ID does not exist" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.215429 4948 scope.go:117] "RemoveContainer" containerID="4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900" Jan 20 20:53:50 crc kubenswrapper[4948]: E0120 20:53:50.215681 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900\": container with ID starting with 4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900 not found: ID does not exist" containerID="4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.215800 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900"} err="failed to get container status \"4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900\": rpc error: code = NotFound desc = could not find container \"4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900\": container with ID starting with 4c1e68b5654caace901b541b1d825fcf57e1364230876d2a96d0a80d0be3c900 not found: ID does not exist" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.215884 4948 scope.go:117] "RemoveContainer" containerID="8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64" Jan 20 20:53:50 crc kubenswrapper[4948]: E0120 20:53:50.216151 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64\": container with ID starting with 8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64 not found: ID does not exist" containerID="8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.216241 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64"} err="failed to get container status \"8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64\": rpc error: code = NotFound desc = could not find container \"8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64\": container with ID starting with 8dd1a4a70d5780888337b89e1f9d2a259bf1a0e38c512ad1a93ec89543ba0d64 not found: ID does not exist" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.225208 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b100d69-21f0-4a17-aeaa-c789d8e54e2f" (UID: "0b100d69-21f0-4a17-aeaa-c789d8e54e2f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.286921 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b100d69-21f0-4a17-aeaa-c789d8e54e2f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.470254 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dwq66"] Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.478779 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dwq66"] Jan 20 20:53:50 crc kubenswrapper[4948]: I0120 20:53:50.579649 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" path="/var/lib/kubelet/pods/0b100d69-21f0-4a17-aeaa-c789d8e54e2f/volumes" Jan 20 20:53:58 crc kubenswrapper[4948]: I0120 20:53:58.570584 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:53:58 crc kubenswrapper[4948]: E0120 20:53:58.571315 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:54:11 crc kubenswrapper[4948]: I0120 20:54:11.570502 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:54:11 crc kubenswrapper[4948]: E0120 20:54:11.571399 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:54:23 crc kubenswrapper[4948]: I0120 20:54:23.569919 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:54:23 crc kubenswrapper[4948]: E0120 20:54:23.571070 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:54:34 crc kubenswrapper[4948]: I0120 20:54:34.571033 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:54:34 crc kubenswrapper[4948]: E0120 20:54:34.571981 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:54:49 crc kubenswrapper[4948]: I0120 20:54:49.570509 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:54:49 crc kubenswrapper[4948]: E0120 20:54:49.571108 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 20:55:02 crc kubenswrapper[4948]: I0120 20:55:02.582260 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:55:02 crc kubenswrapper[4948]: I0120 20:55:02.993794 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"1caecefd109167b1b68675aec6dac8de142f275ff45d4bedbf7d74196eb27169"} Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.513574 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z22wp"] Jan 20 20:56:37 crc kubenswrapper[4948]: E0120 20:56:37.514919 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="extract-utilities" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.514942 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="extract-utilities" Jan 20 20:56:37 crc kubenswrapper[4948]: E0120 20:56:37.514996 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="extract-content" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.515009 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="extract-content" Jan 20 20:56:37 crc kubenswrapper[4948]: E0120 20:56:37.515051 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="registry-server" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.515065 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="registry-server" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.515491 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b100d69-21f0-4a17-aeaa-c789d8e54e2f" containerName="registry-server" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.518911 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.523207 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z22wp"] Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.633839 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tv9ss\" (UniqueName: \"kubernetes.io/projected/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-kube-api-access-tv9ss\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.633962 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-utilities\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.634465 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-catalog-content\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.736305 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-catalog-content\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.736436 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tv9ss\" (UniqueName: \"kubernetes.io/projected/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-kube-api-access-tv9ss\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.736533 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-utilities\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.737163 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-catalog-content\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.737321 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-utilities\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.763221 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tv9ss\" (UniqueName: \"kubernetes.io/projected/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-kube-api-access-tv9ss\") pod \"certified-operators-z22wp\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:37 crc kubenswrapper[4948]: I0120 20:56:37.849813 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:38 crc kubenswrapper[4948]: I0120 20:56:38.362826 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z22wp"] Jan 20 20:56:38 crc kubenswrapper[4948]: I0120 20:56:38.953380 4948 generic.go:334] "Generic (PLEG): container finished" podID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerID="d98d71d1f0e9e4f2395591af47e95ec54f33872460c6190b50ff12f560df5d33" exitCode=0 Jan 20 20:56:38 crc kubenswrapper[4948]: I0120 20:56:38.953765 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z22wp" event={"ID":"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4","Type":"ContainerDied","Data":"d98d71d1f0e9e4f2395591af47e95ec54f33872460c6190b50ff12f560df5d33"} Jan 20 20:56:38 crc kubenswrapper[4948]: I0120 20:56:38.953827 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z22wp" event={"ID":"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4","Type":"ContainerStarted","Data":"2ce6ad6391e4dc66d518b5be9ad832cd6d17cc77f4ba04f2226547e88d283664"} Jan 20 20:56:40 crc kubenswrapper[4948]: I0120 20:56:40.979489 4948 generic.go:334] "Generic (PLEG): container finished" podID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerID="95d1fd467a8fd013eef2c5dc0273573f8112730bd834ea8feac156436c825140" exitCode=0 Jan 20 20:56:40 crc kubenswrapper[4948]: I0120 20:56:40.979589 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z22wp" event={"ID":"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4","Type":"ContainerDied","Data":"95d1fd467a8fd013eef2c5dc0273573f8112730bd834ea8feac156436c825140"} Jan 20 20:56:41 crc kubenswrapper[4948]: I0120 20:56:41.993521 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z22wp" event={"ID":"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4","Type":"ContainerStarted","Data":"7cae468db43544e71f208cac7eb6420c101701efd6667a5150fd5913322dc2e7"} Jan 20 20:56:47 crc kubenswrapper[4948]: I0120 20:56:47.850602 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:47 crc kubenswrapper[4948]: I0120 20:56:47.851194 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:47 crc kubenswrapper[4948]: I0120 20:56:47.896361 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:47 crc kubenswrapper[4948]: I0120 20:56:47.931986 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z22wp" podStartSLOduration=8.361013248 podStartE2EDuration="10.931968531s" podCreationTimestamp="2026-01-20 20:56:37 +0000 UTC" firstStartedPulling="2026-01-20 20:56:38.955205935 +0000 UTC m=+4026.905930904" lastFinishedPulling="2026-01-20 20:56:41.526161228 +0000 UTC m=+4029.476886187" observedRunningTime="2026-01-20 20:56:42.01859559 +0000 UTC m=+4029.969320569" watchObservedRunningTime="2026-01-20 20:56:47.931968531 +0000 UTC m=+4035.882693500" Jan 20 20:56:48 crc kubenswrapper[4948]: I0120 20:56:48.101048 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:48 crc kubenswrapper[4948]: I0120 20:56:48.195058 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z22wp"] Jan 20 20:56:50 crc kubenswrapper[4948]: I0120 20:56:50.533239 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z22wp" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerName="registry-server" containerID="cri-o://7cae468db43544e71f208cac7eb6420c101701efd6667a5150fd5913322dc2e7" gracePeriod=2 Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.546414 4948 generic.go:334] "Generic (PLEG): container finished" podID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerID="7cae468db43544e71f208cac7eb6420c101701efd6667a5150fd5913322dc2e7" exitCode=0 Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.546814 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z22wp" event={"ID":"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4","Type":"ContainerDied","Data":"7cae468db43544e71f208cac7eb6420c101701efd6667a5150fd5913322dc2e7"} Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.546857 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z22wp" event={"ID":"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4","Type":"ContainerDied","Data":"2ce6ad6391e4dc66d518b5be9ad832cd6d17cc77f4ba04f2226547e88d283664"} Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.546877 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ce6ad6391e4dc66d518b5be9ad832cd6d17cc77f4ba04f2226547e88d283664" Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.583416 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.644897 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-catalog-content\") pod \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.645457 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-utilities\") pod \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.645530 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tv9ss\" (UniqueName: \"kubernetes.io/projected/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-kube-api-access-tv9ss\") pod \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\" (UID: \"ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4\") " Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.646349 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-utilities" (OuterVolumeSpecName: "utilities") pod "ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" (UID: "ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.649156 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.660163 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-kube-api-access-tv9ss" (OuterVolumeSpecName: "kube-api-access-tv9ss") pod "ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" (UID: "ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4"). InnerVolumeSpecName "kube-api-access-tv9ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.700146 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" (UID: "ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.750810 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tv9ss\" (UniqueName: \"kubernetes.io/projected/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-kube-api-access-tv9ss\") on node \"crc\" DevicePath \"\"" Jan 20 20:56:51 crc kubenswrapper[4948]: I0120 20:56:51.750840 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:56:52 crc kubenswrapper[4948]: I0120 20:56:52.555365 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z22wp" Jan 20 20:56:52 crc kubenswrapper[4948]: I0120 20:56:52.639216 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z22wp"] Jan 20 20:56:52 crc kubenswrapper[4948]: I0120 20:56:52.651494 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z22wp"] Jan 20 20:56:54 crc kubenswrapper[4948]: I0120 20:56:54.580280 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" path="/var/lib/kubelet/pods/ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4/volumes" Jan 20 20:57:20 crc kubenswrapper[4948]: I0120 20:57:20.249683 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:57:20 crc kubenswrapper[4948]: I0120 20:57:20.250503 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:57:46 crc kubenswrapper[4948]: I0120 20:57:46.498740 4948 scope.go:117] "RemoveContainer" containerID="2bf3ac32145bf900f863a520a4031022443810123d405d2b29917d06e77ab513" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.282715 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-psjw9"] Jan 20 20:57:48 crc kubenswrapper[4948]: E0120 20:57:48.283375 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerName="extract-content" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.283388 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerName="extract-content" Jan 20 20:57:48 crc kubenswrapper[4948]: E0120 20:57:48.283408 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerName="extract-utilities" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.283415 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerName="extract-utilities" Jan 20 20:57:48 crc kubenswrapper[4948]: E0120 20:57:48.283442 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerName="registry-server" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.283448 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerName="registry-server" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.283635 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff4d05d5-97f0-486b-80ce-1c2bc61ab7b4" containerName="registry-server" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.285061 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.297054 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-psjw9"] Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.387649 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mklcj\" (UniqueName: \"kubernetes.io/projected/906d4c8a-4ef3-46ff-9897-97c14fb672bc-kube-api-access-mklcj\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.387810 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-utilities\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.387878 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-catalog-content\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.489149 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-utilities\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.489217 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-catalog-content\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.489334 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mklcj\" (UniqueName: \"kubernetes.io/projected/906d4c8a-4ef3-46ff-9897-97c14fb672bc-kube-api-access-mklcj\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.489601 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-utilities\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.489644 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-catalog-content\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.512046 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mklcj\" (UniqueName: \"kubernetes.io/projected/906d4c8a-4ef3-46ff-9897-97c14fb672bc-kube-api-access-mklcj\") pod \"community-operators-psjw9\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:48 crc kubenswrapper[4948]: I0120 20:57:48.615573 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:49 crc kubenswrapper[4948]: I0120 20:57:49.130999 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-psjw9"] Jan 20 20:57:50 crc kubenswrapper[4948]: I0120 20:57:50.130643 4948 generic.go:334] "Generic (PLEG): container finished" podID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerID="b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c" exitCode=0 Jan 20 20:57:50 crc kubenswrapper[4948]: I0120 20:57:50.130952 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psjw9" event={"ID":"906d4c8a-4ef3-46ff-9897-97c14fb672bc","Type":"ContainerDied","Data":"b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c"} Jan 20 20:57:50 crc kubenswrapper[4948]: I0120 20:57:50.130987 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psjw9" event={"ID":"906d4c8a-4ef3-46ff-9897-97c14fb672bc","Type":"ContainerStarted","Data":"3c270bbe468521afe05881e7651a4a2afbba9de0e8901c9c24be3d0d1ca29205"} Jan 20 20:57:50 crc kubenswrapper[4948]: I0120 20:57:50.249392 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:57:50 crc kubenswrapper[4948]: I0120 20:57:50.249433 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:57:51 crc kubenswrapper[4948]: I0120 20:57:51.201501 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psjw9" event={"ID":"906d4c8a-4ef3-46ff-9897-97c14fb672bc","Type":"ContainerStarted","Data":"827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556"} Jan 20 20:57:52 crc kubenswrapper[4948]: I0120 20:57:52.222485 4948 generic.go:334] "Generic (PLEG): container finished" podID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerID="827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556" exitCode=0 Jan 20 20:57:52 crc kubenswrapper[4948]: I0120 20:57:52.222765 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psjw9" event={"ID":"906d4c8a-4ef3-46ff-9897-97c14fb672bc","Type":"ContainerDied","Data":"827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556"} Jan 20 20:57:53 crc kubenswrapper[4948]: I0120 20:57:53.233834 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psjw9" event={"ID":"906d4c8a-4ef3-46ff-9897-97c14fb672bc","Type":"ContainerStarted","Data":"f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df"} Jan 20 20:57:53 crc kubenswrapper[4948]: I0120 20:57:53.284203 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-psjw9" podStartSLOduration=2.726170836 podStartE2EDuration="5.284182146s" podCreationTimestamp="2026-01-20 20:57:48 +0000 UTC" firstStartedPulling="2026-01-20 20:57:50.132740184 +0000 UTC m=+4098.083465173" lastFinishedPulling="2026-01-20 20:57:52.690751524 +0000 UTC m=+4100.641476483" observedRunningTime="2026-01-20 20:57:53.273667228 +0000 UTC m=+4101.224392197" watchObservedRunningTime="2026-01-20 20:57:53.284182146 +0000 UTC m=+4101.234907115" Jan 20 20:57:58 crc kubenswrapper[4948]: I0120 20:57:58.616264 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:58 crc kubenswrapper[4948]: I0120 20:57:58.617311 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:58 crc kubenswrapper[4948]: I0120 20:57:58.684438 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:59 crc kubenswrapper[4948]: I0120 20:57:59.363884 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:57:59 crc kubenswrapper[4948]: I0120 20:57:59.426066 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-psjw9"] Jan 20 20:58:01 crc kubenswrapper[4948]: I0120 20:58:01.313988 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-psjw9" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerName="registry-server" containerID="cri-o://f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df" gracePeriod=2 Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.293025 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.319630 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-utilities\") pod \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.319696 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mklcj\" (UniqueName: \"kubernetes.io/projected/906d4c8a-4ef3-46ff-9897-97c14fb672bc-kube-api-access-mklcj\") pod \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.319781 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-catalog-content\") pod \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\" (UID: \"906d4c8a-4ef3-46ff-9897-97c14fb672bc\") " Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.326830 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-utilities" (OuterVolumeSpecName: "utilities") pod "906d4c8a-4ef3-46ff-9897-97c14fb672bc" (UID: "906d4c8a-4ef3-46ff-9897-97c14fb672bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.336356 4948 generic.go:334] "Generic (PLEG): container finished" podID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerID="f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df" exitCode=0 Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.336402 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psjw9" event={"ID":"906d4c8a-4ef3-46ff-9897-97c14fb672bc","Type":"ContainerDied","Data":"f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df"} Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.336429 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-psjw9" event={"ID":"906d4c8a-4ef3-46ff-9897-97c14fb672bc","Type":"ContainerDied","Data":"3c270bbe468521afe05881e7651a4a2afbba9de0e8901c9c24be3d0d1ca29205"} Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.336447 4948 scope.go:117] "RemoveContainer" containerID="f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.336593 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-psjw9" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.358535 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/906d4c8a-4ef3-46ff-9897-97c14fb672bc-kube-api-access-mklcj" (OuterVolumeSpecName: "kube-api-access-mklcj") pod "906d4c8a-4ef3-46ff-9897-97c14fb672bc" (UID: "906d4c8a-4ef3-46ff-9897-97c14fb672bc"). InnerVolumeSpecName "kube-api-access-mklcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.387820 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "906d4c8a-4ef3-46ff-9897-97c14fb672bc" (UID: "906d4c8a-4ef3-46ff-9897-97c14fb672bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.404472 4948 scope.go:117] "RemoveContainer" containerID="827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.421425 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.421454 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mklcj\" (UniqueName: \"kubernetes.io/projected/906d4c8a-4ef3-46ff-9897-97c14fb672bc-kube-api-access-mklcj\") on node \"crc\" DevicePath \"\"" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.421465 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/906d4c8a-4ef3-46ff-9897-97c14fb672bc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.424829 4948 scope.go:117] "RemoveContainer" containerID="b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.466992 4948 scope.go:117] "RemoveContainer" containerID="f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df" Jan 20 20:58:02 crc kubenswrapper[4948]: E0120 20:58:02.467979 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df\": container with ID starting with f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df not found: ID does not exist" containerID="f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.468023 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df"} err="failed to get container status \"f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df\": rpc error: code = NotFound desc = could not find container \"f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df\": container with ID starting with f5e3b34a4b71fd22b61891dc124ac296168ae98569cbbad4fdfec6e3e96532df not found: ID does not exist" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.468057 4948 scope.go:117] "RemoveContainer" containerID="827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556" Jan 20 20:58:02 crc kubenswrapper[4948]: E0120 20:58:02.468544 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556\": container with ID starting with 827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556 not found: ID does not exist" containerID="827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.468581 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556"} err="failed to get container status \"827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556\": rpc error: code = NotFound desc = could not find container \"827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556\": container with ID starting with 827aaae75af6421372d0b1f7563d79ec6ae389e035dbfe678849341d9251b556 not found: ID does not exist" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.468609 4948 scope.go:117] "RemoveContainer" containerID="b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c" Jan 20 20:58:02 crc kubenswrapper[4948]: E0120 20:58:02.469035 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c\": container with ID starting with b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c not found: ID does not exist" containerID="b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.469067 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c"} err="failed to get container status \"b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c\": rpc error: code = NotFound desc = could not find container \"b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c\": container with ID starting with b1f25a547b69df602dd39c5adb52fd93be39ea350ea928959b89fcc52c500d6c not found: ID does not exist" Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.689805 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-psjw9"] Jan 20 20:58:02 crc kubenswrapper[4948]: I0120 20:58:02.697207 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-psjw9"] Jan 20 20:58:04 crc kubenswrapper[4948]: I0120 20:58:04.581095 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" path="/var/lib/kubelet/pods/906d4c8a-4ef3-46ff-9897-97c14fb672bc/volumes" Jan 20 20:58:20 crc kubenswrapper[4948]: I0120 20:58:20.249959 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 20:58:20 crc kubenswrapper[4948]: I0120 20:58:20.250447 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 20:58:20 crc kubenswrapper[4948]: I0120 20:58:20.250500 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 20:58:20 crc kubenswrapper[4948]: I0120 20:58:20.251395 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1caecefd109167b1b68675aec6dac8de142f275ff45d4bedbf7d74196eb27169"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 20:58:20 crc kubenswrapper[4948]: I0120 20:58:20.251470 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://1caecefd109167b1b68675aec6dac8de142f275ff45d4bedbf7d74196eb27169" gracePeriod=600 Jan 20 20:58:20 crc kubenswrapper[4948]: I0120 20:58:20.500741 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="1caecefd109167b1b68675aec6dac8de142f275ff45d4bedbf7d74196eb27169" exitCode=0 Jan 20 20:58:20 crc kubenswrapper[4948]: I0120 20:58:20.500904 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"1caecefd109167b1b68675aec6dac8de142f275ff45d4bedbf7d74196eb27169"} Jan 20 20:58:20 crc kubenswrapper[4948]: I0120 20:58:20.501166 4948 scope.go:117] "RemoveContainer" containerID="b97e4ca454f051d7ad5efdc22b948259afdd62a5d93778863c6e3923894b246d" Jan 20 20:58:21 crc kubenswrapper[4948]: I0120 20:58:21.514062 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerStarted","Data":"82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a"} Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.118569 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jpgvm"] Jan 20 20:58:40 crc kubenswrapper[4948]: E0120 20:58:40.119558 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerName="extract-content" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.119576 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerName="extract-content" Jan 20 20:58:40 crc kubenswrapper[4948]: E0120 20:58:40.119603 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerName="extract-utilities" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.119611 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerName="extract-utilities" Jan 20 20:58:40 crc kubenswrapper[4948]: E0120 20:58:40.119624 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerName="registry-server" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.119630 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerName="registry-server" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.119871 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="906d4c8a-4ef3-46ff-9897-97c14fb672bc" containerName="registry-server" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.121611 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.140133 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpgvm"] Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.196138 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgmgd\" (UniqueName: \"kubernetes.io/projected/7fb2a943-1eca-4bf7-8132-86deaadc1eab-kube-api-access-pgmgd\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.197214 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-catalog-content\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.197290 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-utilities\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.299526 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-catalog-content\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.299886 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-utilities\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.300086 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-catalog-content\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.300200 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgmgd\" (UniqueName: \"kubernetes.io/projected/7fb2a943-1eca-4bf7-8132-86deaadc1eab-kube-api-access-pgmgd\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.300225 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-utilities\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.713760 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgmgd\" (UniqueName: \"kubernetes.io/projected/7fb2a943-1eca-4bf7-8132-86deaadc1eab-kube-api-access-pgmgd\") pod \"redhat-marketplace-jpgvm\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:40 crc kubenswrapper[4948]: I0120 20:58:40.746552 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:41 crc kubenswrapper[4948]: I0120 20:58:41.310859 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpgvm"] Jan 20 20:58:41 crc kubenswrapper[4948]: I0120 20:58:41.755590 4948 generic.go:334] "Generic (PLEG): container finished" podID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerID="0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896" exitCode=0 Jan 20 20:58:41 crc kubenswrapper[4948]: I0120 20:58:41.755761 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpgvm" event={"ID":"7fb2a943-1eca-4bf7-8132-86deaadc1eab","Type":"ContainerDied","Data":"0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896"} Jan 20 20:58:41 crc kubenswrapper[4948]: I0120 20:58:41.755986 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpgvm" event={"ID":"7fb2a943-1eca-4bf7-8132-86deaadc1eab","Type":"ContainerStarted","Data":"879ac908a0232df8e91a2f60e9112372bc3e47b179c1a5035217ec7481b9cdc4"} Jan 20 20:58:41 crc kubenswrapper[4948]: I0120 20:58:41.758221 4948 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 20 20:58:42 crc kubenswrapper[4948]: I0120 20:58:42.767165 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpgvm" event={"ID":"7fb2a943-1eca-4bf7-8132-86deaadc1eab","Type":"ContainerStarted","Data":"43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585"} Jan 20 20:58:43 crc kubenswrapper[4948]: I0120 20:58:43.780345 4948 generic.go:334] "Generic (PLEG): container finished" podID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerID="43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585" exitCode=0 Jan 20 20:58:43 crc kubenswrapper[4948]: I0120 20:58:43.780388 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpgvm" event={"ID":"7fb2a943-1eca-4bf7-8132-86deaadc1eab","Type":"ContainerDied","Data":"43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585"} Jan 20 20:58:44 crc kubenswrapper[4948]: I0120 20:58:44.805154 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpgvm" event={"ID":"7fb2a943-1eca-4bf7-8132-86deaadc1eab","Type":"ContainerStarted","Data":"038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965"} Jan 20 20:58:44 crc kubenswrapper[4948]: I0120 20:58:44.851546 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jpgvm" podStartSLOduration=2.315463894 podStartE2EDuration="4.851523382s" podCreationTimestamp="2026-01-20 20:58:40 +0000 UTC" firstStartedPulling="2026-01-20 20:58:41.757939539 +0000 UTC m=+4149.708664508" lastFinishedPulling="2026-01-20 20:58:44.293999027 +0000 UTC m=+4152.244723996" observedRunningTime="2026-01-20 20:58:44.838576285 +0000 UTC m=+4152.789301264" watchObservedRunningTime="2026-01-20 20:58:44.851523382 +0000 UTC m=+4152.802248351" Jan 20 20:58:50 crc kubenswrapper[4948]: I0120 20:58:50.747363 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:50 crc kubenswrapper[4948]: I0120 20:58:50.748146 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:50 crc kubenswrapper[4948]: I0120 20:58:50.820104 4948 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:50 crc kubenswrapper[4948]: I0120 20:58:50.915037 4948 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:51 crc kubenswrapper[4948]: I0120 20:58:51.073938 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpgvm"] Jan 20 20:58:52 crc kubenswrapper[4948]: I0120 20:58:52.875284 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jpgvm" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerName="registry-server" containerID="cri-o://038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965" gracePeriod=2 Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.875823 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.884126 4948 generic.go:334] "Generic (PLEG): container finished" podID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerID="038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965" exitCode=0 Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.884163 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpgvm" event={"ID":"7fb2a943-1eca-4bf7-8132-86deaadc1eab","Type":"ContainerDied","Data":"038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965"} Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.884184 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpgvm" event={"ID":"7fb2a943-1eca-4bf7-8132-86deaadc1eab","Type":"ContainerDied","Data":"879ac908a0232df8e91a2f60e9112372bc3e47b179c1a5035217ec7481b9cdc4"} Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.884188 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpgvm" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.884200 4948 scope.go:117] "RemoveContainer" containerID="038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.910221 4948 scope.go:117] "RemoveContainer" containerID="43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.949250 4948 scope.go:117] "RemoveContainer" containerID="0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.974665 4948 scope.go:117] "RemoveContainer" containerID="038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965" Jan 20 20:58:53 crc kubenswrapper[4948]: E0120 20:58:53.975390 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965\": container with ID starting with 038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965 not found: ID does not exist" containerID="038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.975423 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965"} err="failed to get container status \"038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965\": rpc error: code = NotFound desc = could not find container \"038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965\": container with ID starting with 038349f96870e09a9d40257c5aefdcb0bb68c6ed29aa141e33f1ff85487ec965 not found: ID does not exist" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.975462 4948 scope.go:117] "RemoveContainer" containerID="43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585" Jan 20 20:58:53 crc kubenswrapper[4948]: E0120 20:58:53.976003 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585\": container with ID starting with 43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585 not found: ID does not exist" containerID="43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.976031 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585"} err="failed to get container status \"43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585\": rpc error: code = NotFound desc = could not find container \"43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585\": container with ID starting with 43b9e8669462981ccadb109e552a2c77679559886b9ae8bb59008bc8eb133585 not found: ID does not exist" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.976047 4948 scope.go:117] "RemoveContainer" containerID="0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896" Jan 20 20:58:53 crc kubenswrapper[4948]: E0120 20:58:53.976443 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896\": container with ID starting with 0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896 not found: ID does not exist" containerID="0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.976496 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896"} err="failed to get container status \"0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896\": rpc error: code = NotFound desc = could not find container \"0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896\": container with ID starting with 0812dc6808a82c8fdefc58d8966857d125cc2eae0170ad10c6a559b1d71d2896 not found: ID does not exist" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.988433 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-catalog-content\") pod \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.988626 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-utilities\") pod \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.989297 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgmgd\" (UniqueName: \"kubernetes.io/projected/7fb2a943-1eca-4bf7-8132-86deaadc1eab-kube-api-access-pgmgd\") pod \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\" (UID: \"7fb2a943-1eca-4bf7-8132-86deaadc1eab\") " Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.989813 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-utilities" (OuterVolumeSpecName: "utilities") pod "7fb2a943-1eca-4bf7-8132-86deaadc1eab" (UID: "7fb2a943-1eca-4bf7-8132-86deaadc1eab"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:58:53 crc kubenswrapper[4948]: I0120 20:58:53.994299 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fb2a943-1eca-4bf7-8132-86deaadc1eab-kube-api-access-pgmgd" (OuterVolumeSpecName: "kube-api-access-pgmgd") pod "7fb2a943-1eca-4bf7-8132-86deaadc1eab" (UID: "7fb2a943-1eca-4bf7-8132-86deaadc1eab"). InnerVolumeSpecName "kube-api-access-pgmgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 20:58:54 crc kubenswrapper[4948]: I0120 20:58:54.013199 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7fb2a943-1eca-4bf7-8132-86deaadc1eab" (UID: "7fb2a943-1eca-4bf7-8132-86deaadc1eab"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 20:58:54 crc kubenswrapper[4948]: I0120 20:58:54.091558 4948 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-utilities\") on node \"crc\" DevicePath \"\"" Jan 20 20:58:54 crc kubenswrapper[4948]: I0120 20:58:54.091593 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgmgd\" (UniqueName: \"kubernetes.io/projected/7fb2a943-1eca-4bf7-8132-86deaadc1eab-kube-api-access-pgmgd\") on node \"crc\" DevicePath \"\"" Jan 20 20:58:54 crc kubenswrapper[4948]: I0120 20:58:54.091605 4948 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7fb2a943-1eca-4bf7-8132-86deaadc1eab-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 20 20:58:54 crc kubenswrapper[4948]: I0120 20:58:54.222585 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpgvm"] Jan 20 20:58:54 crc kubenswrapper[4948]: I0120 20:58:54.230589 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpgvm"] Jan 20 20:58:54 crc kubenswrapper[4948]: I0120 20:58:54.583548 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" path="/var/lib/kubelet/pods/7fb2a943-1eca-4bf7-8132-86deaadc1eab/volumes" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.187397 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7"] Jan 20 21:00:00 crc kubenswrapper[4948]: E0120 21:00:00.188578 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerName="registry-server" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.188598 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerName="registry-server" Jan 20 21:00:00 crc kubenswrapper[4948]: E0120 21:00:00.188612 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerName="extract-content" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.188620 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerName="extract-content" Jan 20 21:00:00 crc kubenswrapper[4948]: E0120 21:00:00.188637 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerName="extract-utilities" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.188676 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerName="extract-utilities" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.188960 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="7fb2a943-1eca-4bf7-8132-86deaadc1eab" containerName="registry-server" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.189796 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.192504 4948 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.214105 4948 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.217636 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7"] Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.322487 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-config-volume\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.322538 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-secret-volume\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.322594 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qnp4\" (UniqueName: \"kubernetes.io/projected/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-kube-api-access-6qnp4\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.424475 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-config-volume\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.424520 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-secret-volume\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.424585 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qnp4\" (UniqueName: \"kubernetes.io/projected/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-kube-api-access-6qnp4\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.425742 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-config-volume\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.432312 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-secret-volume\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.442342 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qnp4\" (UniqueName: \"kubernetes.io/projected/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-kube-api-access-6qnp4\") pod \"collect-profiles-29482380-nv9v7\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.512633 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:00 crc kubenswrapper[4948]: I0120 21:00:00.995090 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7"] Jan 20 21:00:01 crc kubenswrapper[4948]: I0120 21:00:01.535366 4948 generic.go:334] "Generic (PLEG): container finished" podID="1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3" containerID="9c2853c61e57fd76c1d053008fe34d5764073b7899c53fb3f4dd6313531c196a" exitCode=0 Jan 20 21:00:01 crc kubenswrapper[4948]: I0120 21:00:01.535473 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" event={"ID":"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3","Type":"ContainerDied","Data":"9c2853c61e57fd76c1d053008fe34d5764073b7899c53fb3f4dd6313531c196a"} Jan 20 21:00:01 crc kubenswrapper[4948]: I0120 21:00:01.535734 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" event={"ID":"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3","Type":"ContainerStarted","Data":"dc1c1e74b6dbc5125c8474abbb022efb2e1cf60805524ce66e4254c3fd54df0b"} Jan 20 21:00:02 crc kubenswrapper[4948]: I0120 21:00:02.935058 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:02 crc kubenswrapper[4948]: I0120 21:00:02.980974 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-secret-volume\") pod \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " Jan 20 21:00:02 crc kubenswrapper[4948]: I0120 21:00:02.981268 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qnp4\" (UniqueName: \"kubernetes.io/projected/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-kube-api-access-6qnp4\") pod \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " Jan 20 21:00:02 crc kubenswrapper[4948]: I0120 21:00:02.981299 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-config-volume\") pod \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\" (UID: \"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3\") " Jan 20 21:00:02 crc kubenswrapper[4948]: I0120 21:00:02.982276 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-config-volume" (OuterVolumeSpecName: "config-volume") pod "1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3" (UID: "1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 20 21:00:03 crc kubenswrapper[4948]: I0120 21:00:03.004627 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3" (UID: "1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 21:00:03 crc kubenswrapper[4948]: I0120 21:00:03.005117 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-kube-api-access-6qnp4" (OuterVolumeSpecName: "kube-api-access-6qnp4") pod "1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3" (UID: "1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3"). InnerVolumeSpecName "kube-api-access-6qnp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 21:00:03 crc kubenswrapper[4948]: I0120 21:00:03.083769 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qnp4\" (UniqueName: \"kubernetes.io/projected/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-kube-api-access-6qnp4\") on node \"crc\" DevicePath \"\"" Jan 20 21:00:03 crc kubenswrapper[4948]: I0120 21:00:03.083802 4948 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-config-volume\") on node \"crc\" DevicePath \"\"" Jan 20 21:00:03 crc kubenswrapper[4948]: I0120 21:00:03.083811 4948 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 20 21:00:03 crc kubenswrapper[4948]: I0120 21:00:03.556789 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" event={"ID":"1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3","Type":"ContainerDied","Data":"dc1c1e74b6dbc5125c8474abbb022efb2e1cf60805524ce66e4254c3fd54df0b"} Jan 20 21:00:03 crc kubenswrapper[4948]: I0120 21:00:03.556828 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc1c1e74b6dbc5125c8474abbb022efb2e1cf60805524ce66e4254c3fd54df0b" Jan 20 21:00:03 crc kubenswrapper[4948]: I0120 21:00:03.556878 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29482380-nv9v7" Jan 20 21:00:04 crc kubenswrapper[4948]: I0120 21:00:04.033491 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl"] Jan 20 21:00:04 crc kubenswrapper[4948]: I0120 21:00:04.045939 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29482335-d94gl"] Jan 20 21:00:04 crc kubenswrapper[4948]: I0120 21:00:04.582983 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41464c5c-9486-4ec9-bb98-ff7d1edf9f29" path="/var/lib/kubelet/pods/41464c5c-9486-4ec9-bb98-ff7d1edf9f29/volumes" Jan 20 21:00:20 crc kubenswrapper[4948]: I0120 21:00:20.249758 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 21:00:20 crc kubenswrapper[4948]: I0120 21:00:20.250489 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 21:00:34 crc kubenswrapper[4948]: I0120 21:00:34.883506 4948 generic.go:334] "Generic (PLEG): container finished" podID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerID="29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd" exitCode=0 Jan 20 21:00:34 crc kubenswrapper[4948]: I0120 21:00:34.883582 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pz7tb/must-gather-749j8" event={"ID":"c84f95ac-5d9f-467b-90fa-fa7da9b2c851","Type":"ContainerDied","Data":"29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd"} Jan 20 21:00:34 crc kubenswrapper[4948]: I0120 21:00:34.884643 4948 scope.go:117] "RemoveContainer" containerID="29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd" Jan 20 21:00:34 crc kubenswrapper[4948]: I0120 21:00:34.993861 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pz7tb_must-gather-749j8_c84f95ac-5d9f-467b-90fa-fa7da9b2c851/gather/0.log" Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.070823 4948 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pz7tb/must-gather-749j8"] Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.072555 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-pz7tb/must-gather-749j8" podUID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerName="copy" containerID="cri-o://ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27" gracePeriod=2 Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.089635 4948 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pz7tb/must-gather-749j8"] Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.781442 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pz7tb_must-gather-749j8_c84f95ac-5d9f-467b-90fa-fa7da9b2c851/copy/0.log" Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.782208 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.869726 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffsk9\" (UniqueName: \"kubernetes.io/projected/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-kube-api-access-ffsk9\") pod \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\" (UID: \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\") " Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.869825 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-must-gather-output\") pod \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\" (UID: \"c84f95ac-5d9f-467b-90fa-fa7da9b2c851\") " Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.893943 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-kube-api-access-ffsk9" (OuterVolumeSpecName: "kube-api-access-ffsk9") pod "c84f95ac-5d9f-467b-90fa-fa7da9b2c851" (UID: "c84f95ac-5d9f-467b-90fa-fa7da9b2c851"). InnerVolumeSpecName "kube-api-access-ffsk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 21:00:45 crc kubenswrapper[4948]: I0120 21:00:45.972226 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffsk9\" (UniqueName: \"kubernetes.io/projected/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-kube-api-access-ffsk9\") on node \"crc\" DevicePath \"\"" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.047986 4948 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pz7tb_must-gather-749j8_c84f95ac-5d9f-467b-90fa-fa7da9b2c851/copy/0.log" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.048800 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pz7tb/must-gather-749j8" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.048973 4948 scope.go:117] "RemoveContainer" containerID="ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.048692 4948 generic.go:334] "Generic (PLEG): container finished" podID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerID="ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27" exitCode=143 Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.051462 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "c84f95ac-5d9f-467b-90fa-fa7da9b2c851" (UID: "c84f95ac-5d9f-467b-90fa-fa7da9b2c851"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.070976 4948 scope.go:117] "RemoveContainer" containerID="29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.078065 4948 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/c84f95ac-5d9f-467b-90fa-fa7da9b2c851-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.162346 4948 scope.go:117] "RemoveContainer" containerID="ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27" Jan 20 21:00:46 crc kubenswrapper[4948]: E0120 21:00:46.163537 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27\": container with ID starting with ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27 not found: ID does not exist" containerID="ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.163576 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27"} err="failed to get container status \"ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27\": rpc error: code = NotFound desc = could not find container \"ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27\": container with ID starting with ddfa3e62ae800091b134b1df60bae0af878aaccf89aa3a3f1b811db5f824ea27 not found: ID does not exist" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.163620 4948 scope.go:117] "RemoveContainer" containerID="29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd" Jan 20 21:00:46 crc kubenswrapper[4948]: E0120 21:00:46.163938 4948 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd\": container with ID starting with 29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd not found: ID does not exist" containerID="29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.164000 4948 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd"} err="failed to get container status \"29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd\": rpc error: code = NotFound desc = could not find container \"29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd\": container with ID starting with 29fcc4b5797ef0a4c1e717b88a58500bbdf7d42186377b8ad813f31d4df707dd not found: ID does not exist" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.584305 4948 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" path="/var/lib/kubelet/pods/c84f95ac-5d9f-467b-90fa-fa7da9b2c851/volumes" Jan 20 21:00:46 crc kubenswrapper[4948]: I0120 21:00:46.685038 4948 scope.go:117] "RemoveContainer" containerID="487ed09f2dd4026ddbfc4d3d5bc5512ecc7f447a233eedc4cf433bb69cfa10ce" Jan 20 21:00:50 crc kubenswrapper[4948]: I0120 21:00:50.249537 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 21:00:50 crc kubenswrapper[4948]: I0120 21:00:50.250950 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.174280 4948 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29482381-wrrrx"] Jan 20 21:01:00 crc kubenswrapper[4948]: E0120 21:01:00.175315 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3" containerName="collect-profiles" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.175333 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3" containerName="collect-profiles" Jan 20 21:01:00 crc kubenswrapper[4948]: E0120 21:01:00.175364 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerName="gather" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.175373 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerName="gather" Jan 20 21:01:00 crc kubenswrapper[4948]: E0120 21:01:00.175390 4948 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerName="copy" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.175401 4948 state_mem.go:107] "Deleted CPUSet assignment" podUID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerName="copy" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.175622 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b7badb5-fd98-45ab-bd65-9fa11fb0b7a3" containerName="collect-profiles" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.175648 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerName="gather" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.175668 4948 memory_manager.go:354] "RemoveStaleState removing state" podUID="c84f95ac-5d9f-467b-90fa-fa7da9b2c851" containerName="copy" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.176439 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.195348 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29482381-wrrrx"] Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.364801 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzmhh\" (UniqueName: \"kubernetes.io/projected/4b7584d6-c38a-4158-8851-85153321d8cf-kube-api-access-kzmhh\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.364877 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-combined-ca-bundle\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.365013 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-config-data\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.365188 4948 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-fernet-keys\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.467123 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzmhh\" (UniqueName: \"kubernetes.io/projected/4b7584d6-c38a-4158-8851-85153321d8cf-kube-api-access-kzmhh\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.467223 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-combined-ca-bundle\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.467281 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-config-data\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.467360 4948 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-fernet-keys\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.475763 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-fernet-keys\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.482754 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-config-data\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.490085 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzmhh\" (UniqueName: \"kubernetes.io/projected/4b7584d6-c38a-4158-8851-85153321d8cf-kube-api-access-kzmhh\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.490130 4948 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-combined-ca-bundle\") pod \"keystone-cron-29482381-wrrrx\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:00 crc kubenswrapper[4948]: I0120 21:01:00.502959 4948 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:01 crc kubenswrapper[4948]: I0120 21:01:01.018372 4948 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29482381-wrrrx"] Jan 20 21:01:01 crc kubenswrapper[4948]: W0120 21:01:01.020973 4948 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b7584d6_c38a_4158_8851_85153321d8cf.slice/crio-3d1ec002c94e15720831038e45e4499bd831ae0a7ca24b6ec2f2d8aa6ea306b2 WatchSource:0}: Error finding container 3d1ec002c94e15720831038e45e4499bd831ae0a7ca24b6ec2f2d8aa6ea306b2: Status 404 returned error can't find the container with id 3d1ec002c94e15720831038e45e4499bd831ae0a7ca24b6ec2f2d8aa6ea306b2 Jan 20 21:01:01 crc kubenswrapper[4948]: I0120 21:01:01.230563 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482381-wrrrx" event={"ID":"4b7584d6-c38a-4158-8851-85153321d8cf","Type":"ContainerStarted","Data":"3d1ec002c94e15720831038e45e4499bd831ae0a7ca24b6ec2f2d8aa6ea306b2"} Jan 20 21:01:02 crc kubenswrapper[4948]: I0120 21:01:02.248163 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482381-wrrrx" event={"ID":"4b7584d6-c38a-4158-8851-85153321d8cf","Type":"ContainerStarted","Data":"0ce558a0d6382a36657802588d4ec803d8fa906590ac433ff58c7f7a8d733c37"} Jan 20 21:01:02 crc kubenswrapper[4948]: I0120 21:01:02.272933 4948 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29482381-wrrrx" podStartSLOduration=2.272907462 podStartE2EDuration="2.272907462s" podCreationTimestamp="2026-01-20 21:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-20 21:01:02.268833607 +0000 UTC m=+4290.219558616" watchObservedRunningTime="2026-01-20 21:01:02.272907462 +0000 UTC m=+4290.223632471" Jan 20 21:01:04 crc kubenswrapper[4948]: I0120 21:01:04.275595 4948 generic.go:334] "Generic (PLEG): container finished" podID="4b7584d6-c38a-4158-8851-85153321d8cf" containerID="0ce558a0d6382a36657802588d4ec803d8fa906590ac433ff58c7f7a8d733c37" exitCode=0 Jan 20 21:01:04 crc kubenswrapper[4948]: I0120 21:01:04.275723 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482381-wrrrx" event={"ID":"4b7584d6-c38a-4158-8851-85153321d8cf","Type":"ContainerDied","Data":"0ce558a0d6382a36657802588d4ec803d8fa906590ac433ff58c7f7a8d733c37"} Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.602312 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.823316 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-combined-ca-bundle\") pod \"4b7584d6-c38a-4158-8851-85153321d8cf\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.824063 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzmhh\" (UniqueName: \"kubernetes.io/projected/4b7584d6-c38a-4158-8851-85153321d8cf-kube-api-access-kzmhh\") pod \"4b7584d6-c38a-4158-8851-85153321d8cf\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.824126 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-config-data\") pod \"4b7584d6-c38a-4158-8851-85153321d8cf\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.824263 4948 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-fernet-keys\") pod \"4b7584d6-c38a-4158-8851-85153321d8cf\" (UID: \"4b7584d6-c38a-4158-8851-85153321d8cf\") " Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.829837 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4b7584d6-c38a-4158-8851-85153321d8cf" (UID: "4b7584d6-c38a-4158-8851-85153321d8cf"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.830365 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b7584d6-c38a-4158-8851-85153321d8cf-kube-api-access-kzmhh" (OuterVolumeSpecName: "kube-api-access-kzmhh") pod "4b7584d6-c38a-4158-8851-85153321d8cf" (UID: "4b7584d6-c38a-4158-8851-85153321d8cf"). InnerVolumeSpecName "kube-api-access-kzmhh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.865887 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b7584d6-c38a-4158-8851-85153321d8cf" (UID: "4b7584d6-c38a-4158-8851-85153321d8cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.894333 4948 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-config-data" (OuterVolumeSpecName: "config-data") pod "4b7584d6-c38a-4158-8851-85153321d8cf" (UID: "4b7584d6-c38a-4158-8851-85153321d8cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.925811 4948 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.925839 4948 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.925852 4948 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzmhh\" (UniqueName: \"kubernetes.io/projected/4b7584d6-c38a-4158-8851-85153321d8cf-kube-api-access-kzmhh\") on node \"crc\" DevicePath \"\"" Jan 20 21:01:05 crc kubenswrapper[4948]: I0120 21:01:05.925861 4948 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b7584d6-c38a-4158-8851-85153321d8cf-config-data\") on node \"crc\" DevicePath \"\"" Jan 20 21:01:06 crc kubenswrapper[4948]: I0120 21:01:06.296808 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29482381-wrrrx" event={"ID":"4b7584d6-c38a-4158-8851-85153321d8cf","Type":"ContainerDied","Data":"3d1ec002c94e15720831038e45e4499bd831ae0a7ca24b6ec2f2d8aa6ea306b2"} Jan 20 21:01:06 crc kubenswrapper[4948]: I0120 21:01:06.296871 4948 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d1ec002c94e15720831038e45e4499bd831ae0a7ca24b6ec2f2d8aa6ea306b2" Jan 20 21:01:06 crc kubenswrapper[4948]: I0120 21:01:06.296967 4948 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29482381-wrrrx" Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.249803 4948 patch_prober.go:28] interesting pod/machine-config-daemon-xg4hv container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.250571 4948 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.250649 4948 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.252476 4948 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a"} pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.252624 4948 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerName="machine-config-daemon" containerID="cri-o://82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" gracePeriod=600 Jan 20 21:01:20 crc kubenswrapper[4948]: E0120 21:01:20.383394 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.445333 4948 generic.go:334] "Generic (PLEG): container finished" podID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" exitCode=0 Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.445396 4948 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" event={"ID":"6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1","Type":"ContainerDied","Data":"82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a"} Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.445438 4948 scope.go:117] "RemoveContainer" containerID="1caecefd109167b1b68675aec6dac8de142f275ff45d4bedbf7d74196eb27169" Jan 20 21:01:20 crc kubenswrapper[4948]: I0120 21:01:20.460059 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:01:20 crc kubenswrapper[4948]: E0120 21:01:20.460750 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:01:31 crc kubenswrapper[4948]: I0120 21:01:31.570104 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:01:31 crc kubenswrapper[4948]: E0120 21:01:31.571130 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:01:43 crc kubenswrapper[4948]: I0120 21:01:43.570757 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:01:43 crc kubenswrapper[4948]: E0120 21:01:43.571398 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:01:54 crc kubenswrapper[4948]: I0120 21:01:54.570537 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:01:54 crc kubenswrapper[4948]: E0120 21:01:54.572083 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:02:09 crc kubenswrapper[4948]: I0120 21:02:09.572181 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:02:09 crc kubenswrapper[4948]: E0120 21:02:09.573468 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:02:20 crc kubenswrapper[4948]: I0120 21:02:20.573758 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:02:20 crc kubenswrapper[4948]: E0120 21:02:20.575027 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:02:32 crc kubenswrapper[4948]: I0120 21:02:32.584981 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:02:32 crc kubenswrapper[4948]: E0120 21:02:32.585913 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:02:43 crc kubenswrapper[4948]: I0120 21:02:43.570179 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:02:43 crc kubenswrapper[4948]: E0120 21:02:43.570801 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:02:46 crc kubenswrapper[4948]: I0120 21:02:46.787594 4948 scope.go:117] "RemoveContainer" containerID="7cae468db43544e71f208cac7eb6420c101701efd6667a5150fd5913322dc2e7" Jan 20 21:02:46 crc kubenswrapper[4948]: I0120 21:02:46.810202 4948 scope.go:117] "RemoveContainer" containerID="95d1fd467a8fd013eef2c5dc0273573f8112730bd834ea8feac156436c825140" Jan 20 21:02:46 crc kubenswrapper[4948]: I0120 21:02:46.828462 4948 scope.go:117] "RemoveContainer" containerID="d98d71d1f0e9e4f2395591af47e95ec54f33872460c6190b50ff12f560df5d33" Jan 20 21:02:55 crc kubenswrapper[4948]: I0120 21:02:55.571519 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:02:55 crc kubenswrapper[4948]: E0120 21:02:55.571986 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" Jan 20 21:03:09 crc kubenswrapper[4948]: I0120 21:03:09.570233 4948 scope.go:117] "RemoveContainer" containerID="82d632d5835f651207fc01044298e58f322a1b98f1a0f380d985333143753b9a" Jan 20 21:03:09 crc kubenswrapper[4948]: E0120 21:03:09.570861 4948 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xg4hv_openshift-machine-config-operator(6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1)\"" pod="openshift-machine-config-operator/machine-config-daemon-xg4hv" podUID="6eb22cc4-345e-4db4-8c4d-cfe3318a8ef1" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515133766640024460 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015133766641017376 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015133755507016520 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015133755507015470 5ustar corecore